aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 01:44:51 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 01:44:51 -0400
commit43d2548bb2ef7e6d753f91468a746784041e522d (patch)
tree77d13fcd48fd998393abb825ec36e2b732684a73 /drivers
parent585583d95c5660973bc0cf64add517b040acd8a4 (diff)
parent85082fd7cbe3173198aac0eb5e85ab1edcc6352c (diff)
Merge commit '85082fd7cbe3173198aac0eb5e85ab1edcc6352c' into test-build
Manual fixup of: arch/powerpc/Kconfig
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acorn/char/Makefile5
-rw-r--r--drivers/acorn/char/defkeymap-l7200.c386
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/topology.c25
-rw-r--r--drivers/block/DAC960.c157
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/paride/pg.c22
-rw-r--r--drivers/block/paride/pt.c27
-rw-r--r--drivers/block/pktcdvd.c46
-rw-r--r--drivers/block/xen-blkfront.c48
-rw-r--r--drivers/bluetooth/hci_vhci.c14
-rw-r--r--drivers/cdrom/cdrom.c274
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c85
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/apm-emulation.c3
-rw-r--r--drivers/char/briq_panel.c9
-rw-r--r--drivers/char/cs5535_gpio.c2
-rw-r--r--drivers/char/drm/Makefile40
-rw-r--r--drivers/char/drm/drm.h694
-rw-r--r--drivers/char/drm/drmP.h1153
-rw-r--r--drivers/char/drm/drm_core.h34
-rw-r--r--drivers/char/drm/drm_hashtab.h67
-rw-r--r--drivers/char/drm/drm_memory.h61
-rw-r--r--drivers/char/drm/drm_memory_debug.h309
-rw-r--r--drivers/char/drm/drm_os_linux.h108
-rw-r--r--drivers/char/drm/drm_pciids.h415
-rw-r--r--drivers/char/drm/drm_sarea.h84
-rw-r--r--drivers/char/drm/drm_sman.h176
-rw-r--r--drivers/char/drm/i810_drm.h281
-rw-r--r--drivers/char/drm/i830_drm.h342
-rw-r--r--drivers/char/drm/i915_drm.h270
-rw-r--r--drivers/char/drm/mga_drm.h417
-rw-r--r--drivers/char/drm/r128_drm.h326
-rw-r--r--drivers/char/drm/radeon_drm.h749
-rw-r--r--drivers/char/drm/savage_drm.h210
-rw-r--r--drivers/char/drm/sis_drm.h67
-rw-r--r--drivers/char/drm/via_drm.h275
-rw-r--r--drivers/char/ds1286.c4
-rw-r--r--drivers/char/ds1620.c9
-rw-r--r--drivers/char/dsp56k.c16
-rw-r--r--drivers/char/dtlk.c3
-rw-r--r--drivers/char/efirtc.c2
-rw-r--r--drivers/char/genrtc.c7
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hvc_xen.c61
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/ip2/ip2main.c34
-rw-r--r--drivers/char/ip27-rtc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c10
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c3
-rw-r--r--drivers/char/lcd.c3
-rw-r--r--drivers/char/lp.c38
-rw-r--r--drivers/char/mbcs.c5
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/misc.c3
-rw-r--r--drivers/char/mwave/mwavedd.c2
-rw-r--r--drivers/char/nvram.c4
-rw-r--r--drivers/char/pc8736x_gpio.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c118
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c23
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c1
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/raw.c3
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/scx200_gpio.c2
-rw-r--r--drivers/char/snsc.c5
-rw-r--r--drivers/char/sonypi.c3
-rw-r--r--drivers/char/tb0219.c2
-rw-r--r--drivers/char/tlclk.c19
-rw-r--r--drivers/char/tpm/tpm.c5
-rw-r--r--drivers/char/tty_io.c41
-rw-r--r--drivers/char/vc_screen.c9
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/vr41xx_giu.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/crypto/Kconfig26
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/hifn_795x.c367
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1506
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/crypto/talitos.c1597
-rw-r--r--drivers/crypto/talitos.h199
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/firmware/memmap.c205
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig (renamed from drivers/char/drm/Kconfig)0
-rw-r--r--drivers/gpu/drm/Makefile26
-rw-r--r--drivers/gpu/drm/README.drm (renamed from drivers/char/drm/README.drm)0
-rw-r--r--drivers/gpu/drm/ati_pcigart.c (renamed from drivers/char/drm/ati_pcigart.c)0
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c (renamed from drivers/char/drm/drm_agpsupport.c)0
-rw-r--r--drivers/gpu/drm/drm_auth.c (renamed from drivers/char/drm/drm_auth.c)0
-rw-r--r--drivers/gpu/drm/drm_bufs.c (renamed from drivers/char/drm/drm_bufs.c)0
-rw-r--r--drivers/gpu/drm/drm_context.c (renamed from drivers/char/drm/drm_context.c)0
-rw-r--r--drivers/gpu/drm/drm_dma.c (renamed from drivers/char/drm/drm_dma.c)0
-rw-r--r--drivers/gpu/drm/drm_drawable.c (renamed from drivers/char/drm/drm_drawable.c)0
-rw-r--r--drivers/gpu/drm/drm_drv.c (renamed from drivers/char/drm/drm_drv.c)0
-rw-r--r--drivers/gpu/drm/drm_fops.c (renamed from drivers/char/drm/drm_fops.c)9
-rw-r--r--drivers/gpu/drm/drm_hashtab.c (renamed from drivers/char/drm/drm_hashtab.c)0
-rw-r--r--drivers/gpu/drm/drm_ioc32.c (renamed from drivers/char/drm/drm_ioc32.c)0
-rw-r--r--drivers/gpu/drm/drm_ioctl.c (renamed from drivers/char/drm/drm_ioctl.c)0
-rw-r--r--drivers/gpu/drm/drm_irq.c (renamed from drivers/char/drm/drm_irq.c)0
-rw-r--r--drivers/gpu/drm/drm_lock.c (renamed from drivers/char/drm/drm_lock.c)0
-rw-r--r--drivers/gpu/drm/drm_memory.c (renamed from drivers/char/drm/drm_memory.c)0
-rw-r--r--drivers/gpu/drm/drm_mm.c (renamed from drivers/char/drm/drm_mm.c)0
-rw-r--r--drivers/gpu/drm/drm_pci.c (renamed from drivers/char/drm/drm_pci.c)0
-rw-r--r--drivers/gpu/drm/drm_proc.c (renamed from drivers/char/drm/drm_proc.c)0
-rw-r--r--drivers/gpu/drm/drm_scatter.c (renamed from drivers/char/drm/drm_scatter.c)0
-rw-r--r--drivers/gpu/drm/drm_sman.c (renamed from drivers/char/drm/drm_sman.c)0
-rw-r--r--drivers/gpu/drm/drm_stub.c (renamed from drivers/char/drm/drm_stub.c)0
-rw-r--r--drivers/gpu/drm/drm_sysfs.c (renamed from drivers/char/drm/drm_sysfs.c)0
-rw-r--r--drivers/gpu/drm/drm_vm.c (renamed from drivers/char/drm/drm_vm.c)0
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c (renamed from drivers/char/drm/i810_dma.c)0
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c (renamed from drivers/char/drm/i810_drv.c)0
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h (renamed from drivers/char/drm/i810_drv.h)0
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c (renamed from drivers/char/drm/i830_dma.c)0
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c (renamed from drivers/char/drm/i830_drv.c)0
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h (renamed from drivers/char/drm/i830_drv.h)0
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c (renamed from drivers/char/drm/i830_irq.c)0
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c (renamed from drivers/char/drm/i915_dma.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c (renamed from drivers/char/drm/i915_drv.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h (renamed from drivers/char/drm/i915_drv.h)0
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c (renamed from drivers/char/drm/i915_ioc32.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c (renamed from drivers/char/drm/i915_irq.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c (renamed from drivers/char/drm/i915_mem.c)0
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c (renamed from drivers/char/drm/mga_dma.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c (renamed from drivers/char/drm/mga_drv.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h (renamed from drivers/char/drm/mga_drv.h)0
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c (renamed from drivers/char/drm/mga_ioc32.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c (renamed from drivers/char/drm/mga_irq.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_state.c (renamed from drivers/char/drm/mga_state.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_ucode.h (renamed from drivers/char/drm/mga_ucode.h)0
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c (renamed from drivers/char/drm/mga_warp.c)0
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c (renamed from drivers/char/drm/r128_cce.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c (renamed from drivers/char/drm/r128_drv.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h (renamed from drivers/char/drm/r128_drv.h)0
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c (renamed from drivers/char/drm/r128_ioc32.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c (renamed from drivers/char/drm/r128_irq.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_state.c (renamed from drivers/char/drm/r128_state.c)0
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c (renamed from drivers/char/drm/r300_cmdbuf.c)0
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h (renamed from drivers/char/drm/r300_reg.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c (renamed from drivers/char/drm/radeon_cp.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c (renamed from drivers/char/drm/radeon_drv.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h (renamed from drivers/char/drm/radeon_drv.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c (renamed from drivers/char/drm/radeon_ioc32.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c (renamed from drivers/char/drm/radeon_irq.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c (renamed from drivers/char/drm/radeon_mem.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_microcode.h (renamed from drivers/char/drm/radeon_microcode.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c (renamed from drivers/char/drm/radeon_state.c)0
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c (renamed from drivers/char/drm/savage_bci.c)0
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c (renamed from drivers/char/drm/savage_drv.c)0
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h (renamed from drivers/char/drm/savage_drv.h)0
-rw-r--r--drivers/gpu/drm/savage/savage_state.c (renamed from drivers/char/drm/savage_state.c)0
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c (renamed from drivers/char/drm/sis_drv.c)0
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h (renamed from drivers/char/drm/sis_drv.h)0
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c (renamed from drivers/char/drm/sis_mm.c)0
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c (renamed from drivers/char/drm/tdfx_drv.c)0
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.h (renamed from drivers/char/drm/tdfx_drv.h)0
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h (renamed from drivers/char/drm/via_3d_reg.h)0
-rw-r--r--drivers/gpu/drm/via/via_dma.c (renamed from drivers/char/drm/via_dma.c)0
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c (renamed from drivers/char/drm/via_dmablit.c)0
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h (renamed from drivers/char/drm/via_dmablit.h)0
-rw-r--r--drivers/gpu/drm/via/via_drv.c (renamed from drivers/char/drm/via_drv.c)0
-rw-r--r--drivers/gpu/drm/via/via_drv.h (renamed from drivers/char/drm/via_drv.h)0
-rw-r--r--drivers/gpu/drm/via/via_irq.c (renamed from drivers/char/drm/via_irq.c)0
-rw-r--r--drivers/gpu/drm/via/via_map.c (renamed from drivers/char/drm/via_map.c)0
-rw-r--r--drivers/gpu/drm/via/via_mm.c (renamed from drivers/char/drm/via_mm.c)0
-rw-r--r--drivers/gpu/drm/via/via_verifier.c (renamed from drivers/char/drm/via_verifier.c)0
-rw-r--r--drivers/gpu/drm/via/via_verifier.h (renamed from drivers/char/drm/via_verifier.h)0
-rw-r--r--drivers/gpu/drm/via/via_video.c (renamed from drivers/char/drm/via_video.c)0
-rw-r--r--drivers/hid/hidraw.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c30
-rw-r--r--drivers/i2c/chips/isp1301_omap.c163
-rw-r--r--drivers/i2c/i2c-dev.c22
-rw-r--r--drivers/ide/ide-tape.c7
-rw-r--r--drivers/ide/legacy/ide-cs.c10
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/uinput.c3
-rw-r--r--drivers/input/mousedev.c12
-rw-r--r--drivers/input/serio/serio_raw.c6
-rw-r--r--drivers/input/xen-kbdfront.c20
-rw-r--r--drivers/isdn/capi/capi.c17
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c16
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c3
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/macintosh/adb.c18
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/macintosh/via-pmu.c3
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c10
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/radio/miropcm20-rds.c4
-rw-r--r--drivers/media/video/videodev.c4
-rw-r--r--drivers/message/fusion/mptctl.c6
-rw-r--r--drivers/message/i2o/i2o_config.c12
-rw-r--r--drivers/misc/atmel_pwm.c2
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c9
-rw-r--r--drivers/misc/phantom.c9
-rw-r--r--drivers/misc/sony-laptop.c3
-rw-r--r--drivers/mmc/host/imxmmc.c23
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/maps/omap_nor.c23
-rw-r--r--drivers/mtd/maps/pcmciamtd.c9
-rw-r--r--drivers/mtd/mtdchar.c22
-rw-r--r--drivers/mtd/nand/orion_nand.c3
-rw-r--r--drivers/mtd/ubi/cdev.c7
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/etherh.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c24
-rw-r--r--drivers/net/macb.c37
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/tun.c13
-rw-r--r--drivers/net/wan/cosa.c22
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/parisc/eisa_eeprom.c3
-rw-r--r--drivers/pci/intel-iommu.c51
-rw-r--r--drivers/pcmcia/Kconfig7
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/au1000_generic.h27
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c1
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c1
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c339
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c16
-rw-r--r--drivers/pcmcia/cs.c14
-rw-r--r--drivers/pcmcia/cs_internal.h13
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/hd64465_ss.c3
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82092aa.h2
-rw-r--r--drivers/pcmcia/i82365.c39
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c3
-rw-r--r--drivers/pcmcia/omap_cf.c25
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c179
-rw-r--r--drivers/pcmcia/pcmcia_resource.c81
-rw-r--r--drivers/pcmcia/pxa2xx_base.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c15
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c13
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c12
-rw-r--r--drivers/pcmcia/rsrc_mgr.c86
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c57
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/pcmcia/socket_sysfs.c8
-rw-r--r--drivers/pcmcia/ti113x.h4
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-at32ap700x.c3
-rw-r--r--drivers/rtc/rtc-at91rm9200.c4
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-dev.c12
-rw-r--r--drivers/rtc/rtc-m41t80.c7
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pl030.c217
-rw-r--r--drivers/rtc/rtc-pl031.c36
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-sa1100.c37
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_3990_erp.c15
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_fba.c12
-rw-r--r--drivers/s390/block/dcssblk.c22
-rw-r--r--drivers/s390/block/xpram.c18
-rw-r--r--drivers/s390/char/con3215.c38
-rw-r--r--drivers/s390/char/con3270.c6
-rw-r--r--drivers/s390/char/fs3270.c34
-rw-r--r--drivers/s390/char/monreader.c78
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/raw3270.c28
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp_cmd.c343
-rw-r--r--drivers/s390/char/sclp_con.c5
-rw-r--r--drivers/s390/char/sclp_config.c17
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c57
-rw-r--r--drivers/s390/char/sclp_quiesce.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/sclp_tty.c261
-rw-r--r--drivers/s390/char/sclp_tty.h53
-rw-r--r--drivers/s390/char/sclp_vt220.c62
-rw-r--r--drivers/s390/char/tape_34xx.c12
-rw-r--r--drivers/s390/char/tape_3590.c21
-rw-r--r--drivers/s390/char/tape_char.c12
-rw-r--r--drivers/s390/char/tape_core.c15
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/vmcp.c41
-rw-r--r--drivers/s390/char/vmlogrdr.c37
-rw-r--r--drivers/s390/char/vmur.c17
-rw-r--r--drivers/s390/char/vmwatchdog.c23
-rw-r--r--drivers/s390/char/zcore.c31
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/airq.c45
-rw-r--r--drivers/s390/cio/chp.c116
-rw-r--r--drivers/s390/cio/chp.h15
-rw-r--r--drivers/s390/cio/chsc.c379
-rw-r--r--drivers/s390/cio/chsc.h26
-rw-r--r--drivers/s390/cio/chsc_sch.c820
-rw-r--r--drivers/s390/cio/chsc_sch.h13
-rw-r--r--drivers/s390/cio/cio.c282
-rw-r--r--drivers/s390/cio/cio.h14
-rw-r--r--drivers/s390/cio/cmf.c20
-rw-r--r--drivers/s390/cio/css.c283
-rw-r--r--drivers/s390/cio/css.h49
-rw-r--r--drivers/s390/cio/device.c476
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c210
-rw-r--r--drivers/s390/cio/device_id.c16
-rw-r--r--drivers/s390/cio/device_ops.c134
-rw-r--r--drivers/s390/cio/device_pgid.c26
-rw-r--r--drivers/s390/cio/device_status.c133
-rw-r--r--drivers/s390/cio/fcx.c350
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/io_sch.h48
-rw-r--r--drivers/s390/cio/ioasm.h2
-rw-r--r--drivers/s390/cio/isc.c68
-rw-r--r--drivers/s390/cio/itcw.c327
-rw-r--r--drivers/s390/cio/qdio.c35
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/cio/scsw.c843
-rw-r--r--drivers/s390/crypto/ap_bus.c63
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c27
-rw-r--r--drivers/s390/crypto/zcrypt_api.h28
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c15
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c69
-rw-r--r--drivers/s390/net/claw.c77
-rw-r--r--drivers/s390/net/ctcm_fsms.c12
-rw-r--r--drivers/s390/net/ctcm_main.c28
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/cu3088.h3
-rw-r--r--drivers/s390/net/lcs.c44
-rw-r--r--drivers/s390/net/netiucv.c61
-rw-r--r--drivers/s390/net/qeth_core_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/s390/net/smsgiucv.c10
-rw-r--r--drivers/s390/s390mach.c106
-rw-r--r--drivers/s390/s390mach.h10
-rw-r--r--drivers/sbus/char/bpp.c3
-rw-r--r--drivers/sbus/char/cpwatchdog.c4
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/sbus/char/jsflash.c13
-rw-r--r--drivers/sbus/char/openprom.c3
-rw-r--r--drivers/sbus/char/riowatchdog.c2
-rw-r--r--drivers/sbus/char/rtc.c3
-rw-r--r--drivers/sbus/char/uctrl.c3
-rw-r--r--drivers/sbus/char/vfc_dev.c5
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/arm/Kconfig2
-rw-r--r--drivers/scsi/arm/acornscsi-io.S15
-rw-r--r--drivers/scsi/arm/acornscsi.c426
-rw-r--r--drivers/scsi/arm/acornscsi.h9
-rw-r--r--drivers/scsi/ch.c4
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/gdth.c3
-rw-r--r--drivers/scsi/megaraid.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2
-rw-r--r--drivers/scsi/osst.c15
-rw-r--r--drivers/scsi/scsi_tgt_if.c2
-rw-r--r--drivers/scsi/sg.c60
-rw-r--r--drivers/scsi/sr.c20
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/serial/Kconfig56
-rw-r--r--drivers/serial/Makefile4
-rw-r--r--drivers/serial/atmel_serial.c17
-rw-r--r--drivers/serial/imx.c318
-rw-r--r--drivers/serial/s3c2400.c106
-rw-r--r--drivers/serial/s3c2410.c1860
-rw-r--r--drivers/serial/s3c2412.c151
-rw-r--r--drivers/serial/s3c2440.c181
-rw-r--r--drivers/serial/samsung.c1317
-rw-r--r--drivers/serial/samsung.h102
-rw-r--r--drivers/spi/spi_imx.c54
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/telephony/phonedev.c3
-rw-r--r--drivers/uio/uio.c17
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/file.c3
-rw-r--r--drivers/usb/gadget/Kconfig16
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/at91_udc.c4
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h4
-rw-r--r--drivers/usb/gadget/inode.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c510
-rw-r--r--drivers/usb/gadget/omap_udc.h61
-rw-r--r--drivers/usb/gadget/printer.c3
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c (renamed from drivers/usb/gadget/pxa2xx_udc.c)309
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h (renamed from drivers/usb/gadget/pxa2xx_udc.h)29
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c9
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h8
-rw-r--r--drivers/usb/host/ohci-omap.c5
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/mon/mon_bin.c6
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/pwm_bl.c185
-rw-r--r--drivers/video/fbmem.c15
-rw-r--r--drivers/video/pxafb.c44
-rw-r--r--drivers/video/sgivwfb.c3
-rw-r--r--drivers/video/xen-fbfront.c211
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c10
-rw-r--r--drivers/xen/events.c114
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/manage.c252
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c23
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c10
447 files changed, 14542 insertions, 12627 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f65deda72d61..fda44679dffc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
10obj-$(CONFIG_PARISC) += parisc/ 10obj-$(CONFIG_PARISC) += parisc/
11obj-$(CONFIG_RAPIDIO) += rapidio/ 11obj-$(CONFIG_RAPIDIO) += rapidio/
12obj-y += video/ 12obj-y += video/
13obj-y += gpu/
13obj-$(CONFIG_ACPI) += acpi/ 14obj-$(CONFIG_ACPI) += acpi/
14# PnP must come after ACPI since it will eventually need to check if acpi 15# PnP must come after ACPI since it will eventually need to check if acpi
15# was used and do nothing if so 16# was used and do nothing if so
diff --git a/drivers/acorn/char/Makefile b/drivers/acorn/char/Makefile
deleted file mode 100644
index d006c9f168d2..000000000000
--- a/drivers/acorn/char/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the acorn character device drivers.
3#
4
5obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c
deleted file mode 100644
index 93d80a1c36f9..000000000000
--- a/drivers/acorn/char/defkeymap-l7200.c
+++ /dev/null
@@ -1,386 +0,0 @@
1/*
2 * linux/drivers/acorn/char/defkeymap-l7200.c
3 *
4 * Default keyboard maps for LinkUp Systems L7200 board
5 *
6 * Copyright (C) 2000 Steve Hill (sjhill@cotw.com)
7 *
8 * Changelog:
9 * 08-04-2000 SJH Created file
10 */
11
12#include <linux/types.h>
13#include <linux/keyboard.h>
14#include <linux/kd.h>
15
16/* Normal (maps 1:1 with no processing) */
17#define KTn 0xF0
18/* Function keys */
19#define KTf 0xF1
20/* Special (Performs special house-keeping funcs) */
21#define KTs 0xF2
22#define KIGNORE K(KTs, 0) /* Ignore */
23#define KENTER K(KTs, 1) /* Enter */
24#define KREGS K(KTs, 2) /* Regs */
25#define KMEM K(KTs, 3) /* Mem */
26#define KSTAT K(KTs, 4) /* State */
27#define KINTR K(KTs, 5) /* Intr */
28#define Ksl 6 /* Last console */
29#define KCAPSLK K(KTs, 7) /* Caps lock */
30#define KNUMLK K(KTs, 8) /* Num-lock */
31#define KSCRLLK K(KTs, 9) /* Scroll-lock */
32#define KSCRLFOR K(KTs,10) /* Scroll forward */
33#define KSCRLBAK K(KTs,11) /* Scroll back */
34#define KREBOOT K(KTs,12) /* Reboot */
35#define KCAPSON K(KTs,13) /* Caps on */
36#define KCOMPOSE K(KTs,14) /* Compose */
37#define KSAK K(KTs,15) /* SAK */
38#define CONS_DEC K(KTs,16) /* Dec console */
39#define CONS_INC K(KTs,17) /* Incr console */
40#define KFLOPPY K(KTs,18) /* Floppy */
41/* Key pad (0-9 = digits, 10=+, 11=-, 12=*, 13=/, 14=enter, 16=., 17=# */
42#define KTp 0xF3
43#define KPAD_0 K(KTp, 0 )
44#define KPAD_1 K(KTp, 1 )
45#define KPAD_2 K(KTp, 2 )
46#define KPAD_3 K(KTp, 3 )
47#define KPAD_4 K(KTp, 4 )
48#define KPAD_5 K(KTp, 5 )
49#define KPAD_6 K(KTp, 6 )
50#define KPAD_7 K(KTp, 7 )
51#define KPAD_8 K(KTp, 8 )
52#define KPAD_9 K(KTp, 9 )
53#define KPAD_PL K(KTp,10 )
54#define KPAD_MI K(KTp,11 )
55#define KPAD_ML K(KTp,12 )
56#define KPAD_DV K(KTp,13 )
57#define KPAD_EN K(KTp,14 )
58#define KPAD_DT K(KTp,16 )
59#define KPAD_HS K(KTp,20 )
60/* Console switching */
61#define KCn 0xF5
62/* Cursor */
63#define KTc 0xF6
64#define Kcd 0 /* Cursor down */
65#define Kcl 1 /* Cursor left */
66#define Kcr 2 /* Cursor right */
67#define Kcu 3 /* Cursor up */
68/* Shift/alt modifiers etc */
69#define KMd 0xF7
70#define KSHIFT K(KMd, 0 )
71#define KALTGR K(KMd, 1 )
72#define KCTRL K(KMd, 2 )
73#define KALT K(KMd, 3 )
74/* Meta */
75#define KMt 0xF8
76#define KAs 0xF9
77#define KPADA_0 K(KAs, 0 )
78#define KPADA_1 K(KAs, 1 )
79#define KPADA_2 K(KAs, 2 )
80#define KPADA_3 K(KAs, 3 )
81#define KPADA_4 K(KAs, 4 )
82#define KPADA_5 K(KAs, 5 )
83#define KPADA_6 K(KAs, 6 )
84#define KPADA_7 K(KAs, 7 )
85#define KPADA_8 K(KAs, 8 )
86#define KPADA_9 K(KAs, 9 )
87#define KPADB_0 K(KAs,10 )
88#define KPADB_1 K(KAs,11 )
89#define KPADB_2 K(KAs,12 )
90#define KPADB_3 K(KAs,13 )
91#define KPADB_4 K(KAs,14 )
92#define KPADB_5 K(KAs,15 )
93#define KPADB_6 K(KAs,16 )
94#define KPADB_7 K(KAs,17 )
95#define KPADB_8 K(KAs,18 )
96#define KPADB_9 K(KAs,19 )
97/* Locking keys */
98#define KLk 0xFA
99/* Letters */
100#define KTl 0xFB
101
102/*
103 * Here is the layout of the keys for the Fujitsu QWERTY
104 * style keyboard:
105 *
106 * static char Fujitsu_Key_Table[] =
107 * {
108 * KALT, '`' , KNUL, KCTL, KFUN, KESC, '1' , '2' ,
109 * '9' , '0' , '-' , '=' , KNUL, KBSP, KNUL, KNUL,
110 * KNUL, KBSL, KSHF, KNUL, KNUL, KDEL, KNUL, 't' ,
111 * 'y' , 'u' , 'i' , KRET, KSHF, KPGD, KNUL, KNUL,
112 * KNUL, KTAB, KNUL, KNUL, KNUL, 'q' , 'w' , 'e' ,
113 * 'r' , 'o' , 'p' , '[' , KNUL, ']' , KNUL, KNUL,
114 * KNUL, 'z' , KNUL, KNUL, KNUL, KSHL, KNUL, KNUL,
115 * 'k' , 'l' , ';' , KSQT, KNUL, KPGU, KNUL, KNUL,
116 * KNUL, 'a' , KNUL, KNUL, KNUL, 's' , 'd' , 'f' ,
117 * 'g' , 'h' , 'j' , '/' , KNUL, KHME, KNUL, KNUL,
118 * KNUL, 'x' , KNUL, KNUL, KNUL, 'c' , 'v' , 'b' ,
119 * 'n' , 'm' , ',' , '.' , KNUL, ' ' , KNUL, KNUL,
120 * KNUL, KNUL, KNUL, KNUL, KNUL, '3' , '4' , '5' ,
121 * '6' , '7' , '8' , KNUL, KPRG, KNUL, KEND, KNUL,
122 * };
123 */
124
125u_short plain_map[NR_KEYS]=
126{
127 0xf703, 0xf060, 0xf200, 0xf702, 0xf200, 0xf01b, 0xf031, 0xf032,
128 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf200, 0xf07f, 0xf200, 0xf200,
129 0xf200, 0xf05c, 0xf700, 0xf200, 0xf200, 0xf116, 0xf000, 0xfb74,
130 0xfb79, 0xfb75, 0xfb69, 0xf201, 0xf700, 0xf600, 0xf200, 0xf200,
131 0xf200, 0xf009, 0xf200, 0xf200, 0xf200, 0xfb71, 0xfb77, 0xfb65,
132 0xfb72, 0xfb6f, 0xfb70, 0xf05b, 0xf200, 0xf05d, 0xf200, 0xf200,
133 0xf200, 0xfb7a, 0xf200, 0xf200, 0xf200, 0xf207, 0xf200, 0xf200,
134 0xfb6b, 0xfb6c, 0xf03b, 0xf027, 0xf200, 0xf603, 0xf200, 0xf200,
135 0xf200, 0xfb61, 0xf200, 0xf200, 0xf200, 0xfb73, 0xfb64, 0xfb66,
136 0xfb67, 0xfb68, 0xfb6a, 0xf02f, 0xf200, 0xf601, 0xf200, 0xf200,
137 0xf200, 0xfb78, 0xf200, 0xf200, 0xf200, 0xfb63, 0xfb76, 0xfb62,
138 0xfb6e, 0xfb6d, 0xf02c, 0xf02e, 0xf200, 0xf020, 0xf200, 0xf200,
139 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf033, 0xf034, 0xf035,
140 0xf036, 0xf037, 0xf038, 0xf200, 0xf200, 0xf200, 0xf602, 0xf200,
141 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
142 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
143};
144
145u_short shift_map[NR_KEYS]=
146{
147 0xf703, 0xf07e, 0xf200, 0xf702, 0xf200, 0xf01b, 0xf021, 0xf040,
148 0xf028, 0xf029, 0xf05f, 0xf02b, 0xf200, 0xf07f, 0xf200, 0xf200,
149 0xf200, 0xf07c, 0xf700, 0xf200, 0xf200, 0xf116, 0xf000, 0xfb54,
150 0xfb59, 0xfb55, 0xfb49, 0xf201, 0xf700, 0xf600, 0xf200, 0xf200,
151 0xf200, 0xf009, 0xf200, 0xf200, 0xf200, 0xfb51, 0xfb57, 0xfb45,
152 0xfb52, 0xfb4f, 0xfb50, 0xf07b, 0xf200, 0xf07d, 0xf200, 0xf200,
153 0xf200, 0xfb5a, 0xf200, 0xf200, 0xf200, 0xf207, 0xf200, 0xf200,
154 0xfb4b, 0xfb4c, 0xf03a, 0xf022, 0xf200, 0xf603, 0xf200, 0xf200,
155 0xf200, 0xfb41, 0xf200, 0xf200, 0xf200, 0xfb53, 0xfb44, 0xfb46,
156 0xfb47, 0xfb48, 0xfb4a, 0xf03f, 0xf200, 0xf601, 0xf200, 0xf200,
157 0xf200, 0xfb58, 0xf200, 0xf200, 0xf200, 0xfb43, 0xfb56, 0xfb42,
158 0xfb4e, 0xfb4d, 0xf03c, 0xf03e, 0xf200, 0xf020, 0xf200, 0xf200,
159 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf023, 0xf024, 0xf025,
160 0xf05e, 0xf026, 0xf02a, 0xf200, 0xf200, 0xf200, 0xf602, 0xf200,
161 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
162 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
163};
164
165u_short altgr_map[NR_KEYS]=
166{
167 KIGNORE ,K(KCn,12 ),K(KCn,13 ),K(KCn,14 ),K(KCn,15 ),K(KCn,16 ),K(KCn,17 ),K(KCn, 18),
168 K(KCn, 19),K(KCn,20 ),K(KCn,21 ),K(KCn,22 ),K(KCn,23 ),KIGNORE ,KREGS ,KINTR ,
169 KIGNORE ,KIGNORE ,K(KTn,'@'),KIGNORE ,K(KTn,'$'),KIGNORE ,KIGNORE ,K(KTn,'{'),
170 K(KTn,'['),K(KTn,']'),K(KTn,'}'),K(KTn,'\\'),KIGNORE ,KIGNORE ,KIGNORE ,K(KTf,21 ),
171 K(KTf,20 ),K(KTf,24 ),KNUMLK ,KPAD_DV ,KPAD_ML ,KPAD_HS ,KIGNORE ,K(KTl,'q'),
172 K(KTl,'w'),K(KTl,'e'),K(KTl,'r'),K(KTl,'t' ),K(KTl,'y'),K(KTl,'u'),K(KTl,'i' ),K(KTl,'o'),
173 K(KTl,'p'),KIGNORE ,K(KTn,'~'),KIGNORE ,K(KTf,22 ),K(KTf,23 ),K(KTf,25 ),KPADB_7 ,
174 KPADB_8 ,KPADB_9 ,KPAD_MI ,KCTRL ,K(KAs,20 ),K(KTl,'s'),K(KAs,23 ),K(KAs,25 ),
175 K(KTl,'g'),K(KTl,'h'),K(KTl,'j'),K(KTl,'k' ),K(KTl,'l'),KIGNORE ,KIGNORE ,KENTER ,
176 KPADB_4 ,KPADB_5 ,KPADB_6 ,KPAD_PL ,KSHIFT ,KIGNORE ,K(KTl,'z' ),K(KTl,'x'),
177 K(KAs,22 ),K(KTl,'v'),K(KTl,21 ),K(KTl,'n' ),K(KTl,'m'),KIGNORE ,KIGNORE ,KIGNORE ,
178 KSHIFT ,K(KTc,Kcu),KPADB_1 ,KPADB_2 ,KPADB_3 ,KCAPSLK ,KALT ,KIGNORE ,
179 KALTGR ,KCTRL ,K(KTc,Kcl),K(KTc,Kcd ),K(KTc,Kcr),KPADB_0 ,KPAD_DT ,KPAD_EN ,
180 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
181 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
182 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
183};
184
185u_short ctrl_map[NR_KEYS]=
186{
187 0xf703, 0xf200, 0xf200, 0xf702, 0xf200, 0xf200, 0xf001, 0xf002,
188 0xf009, 0xf000, 0xf031, 0xf200, 0xf200, 0xf07f, 0xf200, 0xf200,
189 0xf200, 0xf01c, 0xf700, 0xf200, 0xf200, 0xf116, 0xf000, 0xf020,
190 0xf019, 0xf015, 0xf009, 0xf201, 0xf700, 0xf600, 0xf200, 0xf200,
191 0xf200, 0xf009, 0xf200, 0xf200, 0xf200, 0xf011, 0xf017, 0xf005,
192 0xf012, 0xf00f, 0xf010, 0xf01b, 0xf200, 0xf01d, 0xf200, 0xf200,
193 0xf200, 0xf01a, 0xf200, 0xf200, 0xf200, 0xf207, 0xf200, 0xf200,
194 0xf00b, 0xf00c, 0xf200, 0xf007, 0xf200, 0xf603, 0xf200, 0xf200,
195 0xf200, 0xf001, 0xf200, 0xf200, 0xf200, 0xf001, 0xf013, 0xf006,
196 0xf007, 0xf008, 0xf00a, 0xf07f, 0xf200, 0xf601, 0xf200, 0xf200,
197 0xf200, 0xf018, 0xf200, 0xf200, 0xf200, 0xf003, 0xf016, 0xf002,
198 0xf00e, 0xf00d, 0xf200, 0xf200, 0xf200, 0xf000, 0xf200, 0xf200,
199 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf01b, 0xf01c, 0xf01d,
200 0xf036, 0xf037, 0xf038, 0xf200, 0xf200, 0xf200, 0xf602, 0xf200,
201 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
202 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf602, 0xf200,
203};
204
205u_short shift_ctrl_map[NR_KEYS]=
206{
207 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
208 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KFLOPPY ,KINTR ,
209 KIGNORE ,KIGNORE ,K(KTn, 0 ),KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
210 KIGNORE ,KIGNORE ,KIGNORE ,K(KTn,31 ),KIGNORE ,KIGNORE ,KIGNORE ,K(KTf,21 ),
211 K(KTf,20 ),K(KTf,24 ),KNUMLK ,KPAD_DV ,KPAD_ML ,KPAD_HS ,KIGNORE ,K(KTn,17 ),
212 K(KTn,23 ),K(KTn, 5 ),K(KTn,18 ),K(KTn,20 ),K(KTn,25 ),K(KTn,21 ),K(KTn, 9 ),K(KTn,15 ),
213 K(KTn,16 ),KIGNORE ,KIGNORE ,KIGNORE ,K(KTf,22 ),K(KTf,23 ),K(KTf,25 ),KPAD_7 ,
214 KPAD_8 ,KPAD_9 ,KPAD_MI ,KCTRL ,K(KTn, 1 ),K(KTn,19 ),K(KTn, 4 ),K(KTn, 6 ),
215 K(KTn, 7 ),K(KTn, 8 ),K(KTn,10 ),K(KTn,11 ),K(KTn,12 ),KIGNORE ,K(KTn, 7 ),KENTER ,
216 KPAD_4 ,KPAD_5 ,KPAD_6 ,KPAD_PL ,KSHIFT ,KIGNORE ,K(KTn,26 ),K(KTn,24 ),
217 K(KTn, 3 ),K(KTn,22 ),K(KTn, 2 ),K(KTn,14 ),K(KTn,13 ),KIGNORE ,KIGNORE ,KIGNORE ,
218 KSHIFT ,K(KTc,Kcu),KPAD_1 ,KPAD_2 ,KPAD_3 ,KCAPSLK ,KALT ,K(KTn, 0 ),
219 KALTGR ,KCTRL ,K(KTc,Kcl),K(KTc,Kcd ),K(KTc,Kcr),KPAD_0 ,KPAD_DT ,KPAD_EN ,
220 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
221 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
222 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
223};
224
225u_short alt_map[NR_KEYS]=
226{
227 K(KMt,27 ),K(KCn, 0 ),K(KCn, 1 ),K(KCn, 2 ),K(KCn, 3 ),K(KCn, 4 ),K(KCn, 5 ),K(KCn, 6 ),
228 K(KCn, 7 ),K(KCn, 8 ),K(KCn, 9 ),K(KCn,10 ),K(KCn,11 ),KIGNORE ,KSCRLLK ,KINTR ,
229 K(KMt,'`'),K(KMt,'1'),K(KMt,'2'),K(KMt,'3' ),K(KMt,'4'),K(KMt,'5'),K(KMt,'6' ),K(KMt,'7'),
230 K(KMt,'8'),K(KMt,'9'),K(KMt,'0'),K(KMt,'-' ),K(KMt,'='),K(KMt,'£'),K(KMt,127 ),K(KTf,21 ),
231 K(KTf,20 ),K(KTf,24 ),KNUMLK ,KPAD_DV ,KPAD_ML ,KPAD_HS ,K(KMt, 9 ),K(KMt,'q'),
232 K(KMt,'w'),K(KMt,'e'),K(KMt,'r'),K(KMt,'t' ),K(KMt,'y'),K(KMt,'u'),K(KMt,'i' ),K(KMt,'o'),
233 K(KMt,'p'),K(KMt,'['),K(KMt,']'),K(KMt,'\\'),K(KTf,22 ),K(KTf,23 ),K(KTf,25 ),KPADA_7 ,
234 KPADA_8 ,KPADA_9 ,KPAD_MI ,KCTRL ,K(KMt,'a'),K(KMt,'s'),K(KMt,'d' ),K(KMt,'f'),
235 K(KMt,'g'),K(KMt,'h'),K(KMt,'j'),K(KMt,'k' ),K(KMt,'l'),K(KMt,';'),K(KMt,'\''),K(KMt,13 ),
236 KPADA_4 ,KPADA_5 ,KPADA_6 ,KPAD_PL ,KSHIFT ,KIGNORE ,K(KMt,'z' ),K(KMt,'x'),
237 K(KMt,'c'),K(KMt,'v'),K(KMt,'b'),K(KMt,'n' ),K(KMt,'m'),K(KMt,','),K(KMt,'.' ),KIGNORE ,
238 KSHIFT ,K(KTc,Kcu),KPADA_1 ,KPADA_2 ,KPADA_3 ,KCAPSLK ,KALT ,K(KMt,' '),
239 KALTGR ,KCTRL ,CONS_DEC ,K(KTc,Kcd ),CONS_INC ,KPADA_0 ,KPAD_DT ,KPAD_EN ,
240 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
241 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
242 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
243};
244
245u_short ctrl_alt_map[NR_KEYS]=
246{
247 KIGNORE ,K(KCn, 0 ),K(KCn, 1 ),K(KCn, 2 ),K(KCn, 3 ),K(KCn, 4 ),K(KCn, 5 ),K(KCn, 6 ),
248 K(KCn, 7 ),K(KCn, 8 ),K(KCn, 9 ),K(KCn,10 ),K(KCn,11 ),KIGNORE ,KIGNORE ,KINTR ,
249 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
250 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,K(KTf,21 ),
251 K(KTf,20 ),K(KTf,24 ),KNUMLK ,KPAD_DV ,KPAD_ML ,KPAD_HS ,KIGNORE ,K(KMt,17 ),
252 K(KMt,23 ),K(KMt, 5 ),K(KMt,18 ),K(KMt,20 ),K(KMt,25 ),K(KMt,21 ),K(KMt, 9 ),K(KMt,15 ),
253 K(KMt,16 ),KIGNORE ,KIGNORE ,KIGNORE ,KREBOOT ,K(KTf,23 ),K(KTf,25 ),KPAD_7 ,
254 KPAD_8 ,KPAD_9 ,KPAD_MI ,KCTRL ,K(KMt, 1 ),K(KMt,19 ),K(KMt, 4 ),K(KMt, 6 ),
255 K(KMt, 7 ),K(KMt, 8 ),K(KMt,10 ),K(KMt,11 ),K(KMt,12 ),KIGNORE ,KIGNORE ,KENTER ,
256 KPAD_4 ,KPAD_5 ,KPAD_6 ,KPAD_PL ,KSHIFT ,KIGNORE ,K(KMt,26 ),K(KMt,24 ),
257 K(KMt, 3 ),K(KMt,22 ),K(KMt, 2 ),K(KMt,14 ),K(KMt,13 ),KIGNORE ,KIGNORE ,KIGNORE ,
258 KSHIFT ,K(KTc,Kcu),KPAD_1 ,KPAD_2 ,KPAD_3 ,KCAPSLK ,KALT ,KIGNORE ,
259 KALTGR ,KCTRL ,K(KTc,Kcl),K(KTc,Kcd ),K(KTc,Kcr),KPAD_0 ,KREBOOT ,KPAD_EN ,
260 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
261 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
262 KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,KIGNORE ,
263};
264
265ushort *key_maps[MAX_NR_KEYMAPS] = {
266 plain_map, shift_map, altgr_map, 0,
267 ctrl_map, shift_ctrl_map, 0, 0,
268 alt_map, 0, 0, 0,
269 ctrl_alt_map, 0
270};
271
272unsigned int keymap_count = 7;
273
274/*
275 * Philosophy: most people do not define more strings, but they who do
276 * often want quite a lot of string space. So, we statically allocate
277 * the default and allocate dynamically in chunks of 512 bytes.
278 */
279
280char func_buf[] = {
281 '\033', '[', '[', 'A', 0,
282 '\033', '[', '[', 'B', 0,
283 '\033', '[', '[', 'C', 0,
284 '\033', '[', '[', 'D', 0,
285 '\033', '[', '[', 'E', 0,
286 '\033', '[', '1', '7', '~', 0,
287 '\033', '[', '1', '8', '~', 0,
288 '\033', '[', '1', '9', '~', 0,
289 '\033', '[', '2', '0', '~', 0,
290 '\033', '[', '2', '1', '~', 0,
291 '\033', '[', '2', '3', '~', 0,
292 '\033', '[', '2', '4', '~', 0,
293 '\033', '[', '2', '5', '~', 0,
294 '\033', '[', '2', '6', '~', 0,
295 '\033', '[', '2', '8', '~', 0,
296 '\033', '[', '2', '9', '~', 0,
297 '\033', '[', '3', '1', '~', 0,
298 '\033', '[', '3', '2', '~', 0,
299 '\033', '[', '3', '3', '~', 0,
300 '\033', '[', '3', '4', '~', 0,
301 '\033', '[', '1', '~', 0,
302 '\033', '[', '2', '~', 0,
303 '\033', '[', '3', '~', 0,
304 '\033', '[', '4', '~', 0,
305 '\033', '[', '5', '~', 0,
306 '\033', '[', '6', '~', 0,
307 '\033', '[', 'M', 0,
308 '\033', '[', 'P', 0,
309};
310
311char *funcbufptr = func_buf;
312int funcbufsize = sizeof(func_buf);
313int funcbufleft = 0; /* space left */
314
315char *func_table[MAX_NR_FUNC] = {
316 func_buf + 0,
317 func_buf + 5,
318 func_buf + 10,
319 func_buf + 15,
320 func_buf + 20,
321 func_buf + 25,
322 func_buf + 31,
323 func_buf + 37,
324 func_buf + 43,
325 func_buf + 49,
326 func_buf + 55,
327 func_buf + 61,
328 func_buf + 67,
329 func_buf + 73,
330 func_buf + 79,
331 func_buf + 85,
332 func_buf + 91,
333 func_buf + 97,
334 func_buf + 103,
335 func_buf + 109,
336 func_buf + 115,
337 func_buf + 120,
338 func_buf + 125,
339 func_buf + 130,
340 func_buf + 135,
341 func_buf + 140,
342 func_buf + 145,
343 0,
344 0,
345 func_buf + 149,
346 0,
347};
348
349struct kbdiacruc accent_table[MAX_DIACR] = {
350 {'`', 'A', 0300}, {'`', 'a', 0340},
351 {'\'', 'A', 0301}, {'\'', 'a', 0341},
352 {'^', 'A', 0302}, {'^', 'a', 0342},
353 {'~', 'A', 0303}, {'~', 'a', 0343},
354 {'"', 'A', 0304}, {'"', 'a', 0344},
355 {'O', 'A', 0305}, {'o', 'a', 0345},
356 {'0', 'A', 0305}, {'0', 'a', 0345},
357 {'A', 'A', 0305}, {'a', 'a', 0345},
358 {'A', 'E', 0306}, {'a', 'e', 0346},
359 {',', 'C', 0307}, {',', 'c', 0347},
360 {'`', 'E', 0310}, {'`', 'e', 0350},
361 {'\'', 'E', 0311}, {'\'', 'e', 0351},
362 {'^', 'E', 0312}, {'^', 'e', 0352},
363 {'"', 'E', 0313}, {'"', 'e', 0353},
364 {'`', 'I', 0314}, {'`', 'i', 0354},
365 {'\'', 'I', 0315}, {'\'', 'i', 0355},
366 {'^', 'I', 0316}, {'^', 'i', 0356},
367 {'"', 'I', 0317}, {'"', 'i', 0357},
368 {'-', 'D', 0320}, {'-', 'd', 0360},
369 {'~', 'N', 0321}, {'~', 'n', 0361},
370 {'`', 'O', 0322}, {'`', 'o', 0362},
371 {'\'', 'O', 0323}, {'\'', 'o', 0363},
372 {'^', 'O', 0324}, {'^', 'o', 0364},
373 {'~', 'O', 0325}, {'~', 'o', 0365},
374 {'"', 'O', 0326}, {'"', 'o', 0366},
375 {'/', 'O', 0330}, {'/', 'o', 0370},
376 {'`', 'U', 0331}, {'`', 'u', 0371},
377 {'\'', 'U', 0332}, {'\'', 'u', 0372},
378 {'^', 'U', 0333}, {'^', 'u', 0373},
379 {'"', 'U', 0334}, {'"', 'u', 0374},
380 {'\'', 'Y', 0335}, {'\'', 'y', 0375},
381 {'T', 'H', 0336}, {'t', 'h', 0376},
382 {'s', 's', 0337}, {'"', 'y', 0377},
383 {'s', 'z', 0337}, {'i', 'j', 0377},
384};
385
386unsigned int accent_table_size = 68;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c52fca833268..bba867391a85 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -4,8 +4,6 @@
4 4
5menuconfig ACPI 5menuconfig ACPI
6 bool "ACPI (Advanced Configuration and Power Interface) Support" 6 bool "ACPI (Advanced Configuration and Power Interface) Support"
7 depends on !X86_NUMAQ
8 depends on !X86_VISWS
9 depends on !IA64_HP_SIM 7 depends on !IA64_HP_SIM
10 depends on IA64 || X86 8 depends on IA64 || X86
11 depends on PCI 9 depends on PCI
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 57a43649a461..499ccc628d81 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -885,7 +885,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
885 /* set the min alignment and padding */ 885 /* set the min alignment and padding */
886 blk_queue_update_dma_alignment(sdev->request_queue, 886 blk_queue_update_dma_alignment(sdev->request_queue,
887 ATA_DMA_PAD_SZ - 1); 887 ATA_DMA_PAD_SZ - 1);
888 blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); 888 blk_queue_update_dma_pad(sdev->request_queue,
889 ATA_DMA_PAD_SZ - 1);
889 890
890 /* configure draining */ 891 /* configure draining */
891 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 892 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2b4b392dcbc1..87a7f1d02578 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -153,7 +153,7 @@ EXPORT_SYMBOL(set_trace_device);
153 * it's not any guarantee, but it's a high _likelihood_ that 153 * it's not any guarantee, but it's a high _likelihood_ that
154 * the match is valid). 154 * the match is valid).
155 */ 155 */
156void generate_resume_trace(void *tracedata, unsigned int user) 156void generate_resume_trace(const void *tracedata, unsigned int user)
157{ 157{
158 unsigned short lineno = *(unsigned short *)tracedata; 158 unsigned short lineno = *(unsigned short *)tracedata;
159 const char *file = *(const char **)(tracedata + 2); 159 const char *file = *(const char **)(tracedata + 2);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fdf4044d2e74..1efe162e16d7 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -40,6 +40,7 @@ static ssize_t show_##name(struct sys_device *dev, char *buf) \
40 return sprintf(buf, "%d\n", topology_##name(cpu)); \ 40 return sprintf(buf, "%d\n", topology_##name(cpu)); \
41} 41}
42 42
43#if defined(topology_thread_siblings) || defined(topology_core_siblings)
43static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) 44static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
44{ 45{
45 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; 46 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -54,21 +55,41 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
54 } 55 }
55 return n; 56 return n;
56} 57}
58#endif
57 59
60#ifdef arch_provides_topology_pointers
58#define define_siblings_show_map(name) \ 61#define define_siblings_show_map(name) \
59static inline ssize_t show_##name(struct sys_device *dev, char *buf) \ 62static ssize_t show_##name(struct sys_device *dev, char *buf) \
60{ \ 63{ \
61 unsigned int cpu = dev->id; \ 64 unsigned int cpu = dev->id; \
62 return show_cpumap(0, &(topology_##name(cpu)), buf); \ 65 return show_cpumap(0, &(topology_##name(cpu)), buf); \
63} 66}
64 67
65#define define_siblings_show_list(name) \ 68#define define_siblings_show_list(name) \
66static inline ssize_t show_##name##_list(struct sys_device *dev, char *buf) \ 69static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
67{ \ 70{ \
68 unsigned int cpu = dev->id; \ 71 unsigned int cpu = dev->id; \
69 return show_cpumap(1, &(topology_##name(cpu)), buf); \ 72 return show_cpumap(1, &(topology_##name(cpu)), buf); \
70} 73}
71 74
75#else
76#define define_siblings_show_map(name) \
77static ssize_t show_##name(struct sys_device *dev, char *buf) \
78{ \
79 unsigned int cpu = dev->id; \
80 cpumask_t mask = topology_##name(cpu); \
81 return show_cpumap(0, &mask, buf); \
82}
83
84#define define_siblings_show_list(name) \
85static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
86{ \
87 unsigned int cpu = dev->id; \
88 cpumask_t mask = topology_##name(cpu); \
89 return show_cpumap(1, &mask, buf); \
90}
91#endif
92
72#define define_siblings_show_func(name) \ 93#define define_siblings_show_func(name) \
73 define_siblings_show_map(name); define_siblings_show_list(name) 94 define_siblings_show_map(name); define_siblings_show_list(name)
74 95
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index cd03473f3547..a002a381df92 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6628,15 +6628,18 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
6628 * DAC960_gam_ioctl is the ioctl function for performing RAID operations. 6628 * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
6629*/ 6629*/
6630 6630
6631static int DAC960_gam_ioctl(struct inode *inode, struct file *file, 6631static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
6632 unsigned int Request, unsigned long Argument) 6632 unsigned long Argument)
6633{ 6633{
6634 int ErrorCode = 0; 6634 long ErrorCode = 0;
6635 if (!capable(CAP_SYS_ADMIN)) return -EACCES; 6635 if (!capable(CAP_SYS_ADMIN)) return -EACCES;
6636
6637 lock_kernel();
6636 switch (Request) 6638 switch (Request)
6637 { 6639 {
6638 case DAC960_IOCTL_GET_CONTROLLER_COUNT: 6640 case DAC960_IOCTL_GET_CONTROLLER_COUNT:
6639 return DAC960_ControllerCount; 6641 ErrorCode = DAC960_ControllerCount;
6642 break;
6640 case DAC960_IOCTL_GET_CONTROLLER_INFO: 6643 case DAC960_IOCTL_GET_CONTROLLER_INFO:
6641 { 6644 {
6642 DAC960_ControllerInfo_T __user *UserSpaceControllerInfo = 6645 DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
@@ -6644,15 +6647,20 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6644 DAC960_ControllerInfo_T ControllerInfo; 6647 DAC960_ControllerInfo_T ControllerInfo;
6645 DAC960_Controller_T *Controller; 6648 DAC960_Controller_T *Controller;
6646 int ControllerNumber; 6649 int ControllerNumber;
6647 if (UserSpaceControllerInfo == NULL) return -EINVAL; 6650 if (UserSpaceControllerInfo == NULL)
6648 ErrorCode = get_user(ControllerNumber, 6651 ErrorCode = -EINVAL;
6652 else ErrorCode = get_user(ControllerNumber,
6649 &UserSpaceControllerInfo->ControllerNumber); 6653 &UserSpaceControllerInfo->ControllerNumber);
6650 if (ErrorCode != 0) return ErrorCode; 6654 if (ErrorCode != 0)
6655 break;;
6656 ErrorCode = -ENXIO;
6651 if (ControllerNumber < 0 || 6657 if (ControllerNumber < 0 ||
6652 ControllerNumber > DAC960_ControllerCount - 1) 6658 ControllerNumber > DAC960_ControllerCount - 1) {
6653 return -ENXIO; 6659 break;
6660 }
6654 Controller = DAC960_Controllers[ControllerNumber]; 6661 Controller = DAC960_Controllers[ControllerNumber];
6655 if (Controller == NULL) return -ENXIO; 6662 if (Controller == NULL)
6663 break;;
6656 memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T)); 6664 memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
6657 ControllerInfo.ControllerNumber = ControllerNumber; 6665 ControllerInfo.ControllerNumber = ControllerNumber;
6658 ControllerInfo.FirmwareType = Controller->FirmwareType; 6666 ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6665,8 +6673,9 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6665 ControllerInfo.PCI_Address = Controller->PCI_Address; 6673 ControllerInfo.PCI_Address = Controller->PCI_Address;
6666 strcpy(ControllerInfo.ModelName, Controller->ModelName); 6674 strcpy(ControllerInfo.ModelName, Controller->ModelName);
6667 strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion); 6675 strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
6668 return (copy_to_user(UserSpaceControllerInfo, &ControllerInfo, 6676 ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
6669 sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0); 6677 sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
6678 break;
6670 } 6679 }
6671 case DAC960_IOCTL_V1_EXECUTE_COMMAND: 6680 case DAC960_IOCTL_V1_EXECUTE_COMMAND:
6672 { 6681 {
@@ -6684,30 +6693,39 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6684 int ControllerNumber, DataTransferLength; 6693 int ControllerNumber, DataTransferLength;
6685 unsigned char *DataTransferBuffer = NULL; 6694 unsigned char *DataTransferBuffer = NULL;
6686 dma_addr_t DataTransferBufferDMA; 6695 dma_addr_t DataTransferBufferDMA;
6687 if (UserSpaceUserCommand == NULL) return -EINVAL; 6696 if (UserSpaceUserCommand == NULL) {
6697 ErrorCode = -EINVAL;
6698 break;
6699 }
6688 if (copy_from_user(&UserCommand, UserSpaceUserCommand, 6700 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6689 sizeof(DAC960_V1_UserCommand_T))) { 6701 sizeof(DAC960_V1_UserCommand_T))) {
6690 ErrorCode = -EFAULT; 6702 ErrorCode = -EFAULT;
6691 goto Failure1a; 6703 break;
6692 } 6704 }
6693 ControllerNumber = UserCommand.ControllerNumber; 6705 ControllerNumber = UserCommand.ControllerNumber;
6706 ErrorCode = -ENXIO;
6694 if (ControllerNumber < 0 || 6707 if (ControllerNumber < 0 ||
6695 ControllerNumber > DAC960_ControllerCount - 1) 6708 ControllerNumber > DAC960_ControllerCount - 1)
6696 return -ENXIO; 6709 break;
6697 Controller = DAC960_Controllers[ControllerNumber]; 6710 Controller = DAC960_Controllers[ControllerNumber];
6698 if (Controller == NULL) return -ENXIO; 6711 if (Controller == NULL)
6699 if (Controller->FirmwareType != DAC960_V1_Controller) return -EINVAL; 6712 break;
6713 ErrorCode = -EINVAL;
6714 if (Controller->FirmwareType != DAC960_V1_Controller)
6715 break;
6700 CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode; 6716 CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
6701 DataTransferLength = UserCommand.DataTransferLength; 6717 DataTransferLength = UserCommand.DataTransferLength;
6702 if (CommandOpcode & 0x80) return -EINVAL; 6718 if (CommandOpcode & 0x80)
6719 break;
6703 if (CommandOpcode == DAC960_V1_DCDB) 6720 if (CommandOpcode == DAC960_V1_DCDB)
6704 { 6721 {
6705 if (copy_from_user(&DCDB, UserCommand.DCDB, 6722 if (copy_from_user(&DCDB, UserCommand.DCDB,
6706 sizeof(DAC960_V1_DCDB_T))) { 6723 sizeof(DAC960_V1_DCDB_T))) {
6707 ErrorCode = -EFAULT; 6724 ErrorCode = -EFAULT;
6708 goto Failure1a; 6725 break;
6709 } 6726 }
6710 if (DCDB.Channel >= DAC960_V1_MaxChannels) return -EINVAL; 6727 if (DCDB.Channel >= DAC960_V1_MaxChannels)
6728 break;
6711 if (!((DataTransferLength == 0 && 6729 if (!((DataTransferLength == 0 &&
6712 DCDB.Direction 6730 DCDB.Direction
6713 == DAC960_V1_DCDB_NoDataTransfer) || 6731 == DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6717,38 +6735,37 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6717 (DataTransferLength < 0 && 6735 (DataTransferLength < 0 &&
6718 DCDB.Direction 6736 DCDB.Direction
6719 == DAC960_V1_DCDB_DataTransferSystemToDevice))) 6737 == DAC960_V1_DCDB_DataTransferSystemToDevice)))
6720 return -EINVAL; 6738 break;
6721 if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength) 6739 if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
6722 != abs(DataTransferLength)) 6740 != abs(DataTransferLength))
6723 return -EINVAL; 6741 break;
6724 DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice, 6742 DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
6725 sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA); 6743 sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
6726 if (DCDB_IOBUF == NULL) 6744 if (DCDB_IOBUF == NULL) {
6727 return -ENOMEM; 6745 ErrorCode = -ENOMEM;
6746 break;
6747 }
6728 } 6748 }
6749 ErrorCode = -ENOMEM;
6729 if (DataTransferLength > 0) 6750 if (DataTransferLength > 0)
6730 { 6751 {
6731 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6752 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6732 DataTransferLength, &DataTransferBufferDMA); 6753 DataTransferLength, &DataTransferBufferDMA);
6733 if (DataTransferBuffer == NULL) { 6754 if (DataTransferBuffer == NULL)
6734 ErrorCode = -ENOMEM; 6755 break;
6735 goto Failure1;
6736 }
6737 memset(DataTransferBuffer, 0, DataTransferLength); 6756 memset(DataTransferBuffer, 0, DataTransferLength);
6738 } 6757 }
6739 else if (DataTransferLength < 0) 6758 else if (DataTransferLength < 0)
6740 { 6759 {
6741 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6760 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6742 -DataTransferLength, &DataTransferBufferDMA); 6761 -DataTransferLength, &DataTransferBufferDMA);
6743 if (DataTransferBuffer == NULL) { 6762 if (DataTransferBuffer == NULL)
6744 ErrorCode = -ENOMEM; 6763 break;
6745 goto Failure1;
6746 }
6747 if (copy_from_user(DataTransferBuffer, 6764 if (copy_from_user(DataTransferBuffer,
6748 UserCommand.DataTransferBuffer, 6765 UserCommand.DataTransferBuffer,
6749 -DataTransferLength)) { 6766 -DataTransferLength)) {
6750 ErrorCode = -EFAULT; 6767 ErrorCode = -EFAULT;
6751 goto Failure1; 6768 break;
6752 } 6769 }
6753 } 6770 }
6754 if (CommandOpcode == DAC960_V1_DCDB) 6771 if (CommandOpcode == DAC960_V1_DCDB)
@@ -6825,8 +6842,7 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6825 if (DCDB_IOBUF != NULL) 6842 if (DCDB_IOBUF != NULL)
6826 pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T), 6843 pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
6827 DCDB_IOBUF, DCDB_IOBUFDMA); 6844 DCDB_IOBUF, DCDB_IOBUFDMA);
6828 Failure1a: 6845 break;
6829 return ErrorCode;
6830 } 6846 }
6831 case DAC960_IOCTL_V2_EXECUTE_COMMAND: 6847 case DAC960_IOCTL_V2_EXECUTE_COMMAND:
6832 { 6848 {
@@ -6844,32 +6860,43 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6844 dma_addr_t DataTransferBufferDMA; 6860 dma_addr_t DataTransferBufferDMA;
6845 unsigned char *RequestSenseBuffer = NULL; 6861 unsigned char *RequestSenseBuffer = NULL;
6846 dma_addr_t RequestSenseBufferDMA; 6862 dma_addr_t RequestSenseBufferDMA;
6847 if (UserSpaceUserCommand == NULL) return -EINVAL; 6863
6864 ErrorCode = -EINVAL;
6865 if (UserSpaceUserCommand == NULL)
6866 break;
6848 if (copy_from_user(&UserCommand, UserSpaceUserCommand, 6867 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6849 sizeof(DAC960_V2_UserCommand_T))) { 6868 sizeof(DAC960_V2_UserCommand_T))) {
6850 ErrorCode = -EFAULT; 6869 ErrorCode = -EFAULT;
6851 goto Failure2a; 6870 break;
6852 } 6871 }
6872 ErrorCode = -ENXIO;
6853 ControllerNumber = UserCommand.ControllerNumber; 6873 ControllerNumber = UserCommand.ControllerNumber;
6854 if (ControllerNumber < 0 || 6874 if (ControllerNumber < 0 ||
6855 ControllerNumber > DAC960_ControllerCount - 1) 6875 ControllerNumber > DAC960_ControllerCount - 1)
6856 return -ENXIO; 6876 break;
6857 Controller = DAC960_Controllers[ControllerNumber]; 6877 Controller = DAC960_Controllers[ControllerNumber];
6858 if (Controller == NULL) return -ENXIO; 6878 if (Controller == NULL)
6859 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL; 6879 break;
6880 if (Controller->FirmwareType != DAC960_V2_Controller){
6881 ErrorCode = -EINVAL;
6882 break;
6883 }
6860 DataTransferLength = UserCommand.DataTransferLength; 6884 DataTransferLength = UserCommand.DataTransferLength;
6885 ErrorCode = -ENOMEM;
6861 if (DataTransferLength > 0) 6886 if (DataTransferLength > 0)
6862 { 6887 {
6863 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6888 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6864 DataTransferLength, &DataTransferBufferDMA); 6889 DataTransferLength, &DataTransferBufferDMA);
6865 if (DataTransferBuffer == NULL) return -ENOMEM; 6890 if (DataTransferBuffer == NULL)
6891 break;
6866 memset(DataTransferBuffer, 0, DataTransferLength); 6892 memset(DataTransferBuffer, 0, DataTransferLength);
6867 } 6893 }
6868 else if (DataTransferLength < 0) 6894 else if (DataTransferLength < 0)
6869 { 6895 {
6870 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6896 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6871 -DataTransferLength, &DataTransferBufferDMA); 6897 -DataTransferLength, &DataTransferBufferDMA);
6872 if (DataTransferBuffer == NULL) return -ENOMEM; 6898 if (DataTransferBuffer == NULL)
6899 break;
6873 if (copy_from_user(DataTransferBuffer, 6900 if (copy_from_user(DataTransferBuffer,
6874 UserCommand.DataTransferBuffer, 6901 UserCommand.DataTransferBuffer,
6875 -DataTransferLength)) { 6902 -DataTransferLength)) {
@@ -6979,8 +7006,7 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6979 if (RequestSenseBuffer != NULL) 7006 if (RequestSenseBuffer != NULL)
6980 pci_free_consistent(Controller->PCIDevice, RequestSenseLength, 7007 pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
6981 RequestSenseBuffer, RequestSenseBufferDMA); 7008 RequestSenseBuffer, RequestSenseBufferDMA);
6982 Failure2a: 7009 break;
6983 return ErrorCode;
6984 } 7010 }
6985 case DAC960_IOCTL_V2_GET_HEALTH_STATUS: 7011 case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
6986 { 7012 {
@@ -6990,21 +7016,33 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6990 DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer; 7016 DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
6991 DAC960_Controller_T *Controller; 7017 DAC960_Controller_T *Controller;
6992 int ControllerNumber; 7018 int ControllerNumber;
6993 if (UserSpaceGetHealthStatus == NULL) return -EINVAL; 7019 if (UserSpaceGetHealthStatus == NULL) {
7020 ErrorCode = -EINVAL;
7021 break;
7022 }
6994 if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus, 7023 if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
6995 sizeof(DAC960_V2_GetHealthStatus_T))) 7024 sizeof(DAC960_V2_GetHealthStatus_T))) {
6996 return -EFAULT; 7025 ErrorCode = -EFAULT;
7026 break;
7027 }
7028 ErrorCode = -ENXIO;
6997 ControllerNumber = GetHealthStatus.ControllerNumber; 7029 ControllerNumber = GetHealthStatus.ControllerNumber;
6998 if (ControllerNumber < 0 || 7030 if (ControllerNumber < 0 ||
6999 ControllerNumber > DAC960_ControllerCount - 1) 7031 ControllerNumber > DAC960_ControllerCount - 1)
7000 return -ENXIO; 7032 break;
7001 Controller = DAC960_Controllers[ControllerNumber]; 7033 Controller = DAC960_Controllers[ControllerNumber];
7002 if (Controller == NULL) return -ENXIO; 7034 if (Controller == NULL)
7003 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL; 7035 break;
7036 if (Controller->FirmwareType != DAC960_V2_Controller) {
7037 ErrorCode = -EINVAL;
7038 break;
7039 }
7004 if (copy_from_user(&HealthStatusBuffer, 7040 if (copy_from_user(&HealthStatusBuffer,
7005 GetHealthStatus.HealthStatusBuffer, 7041 GetHealthStatus.HealthStatusBuffer,
7006 sizeof(DAC960_V2_HealthStatusBuffer_T))) 7042 sizeof(DAC960_V2_HealthStatusBuffer_T))) {
7007 return -EFAULT; 7043 ErrorCode = -EFAULT;
7044 break;
7045 }
7008 while (Controller->V2.HealthStatusBuffer->StatusChangeCounter 7046 while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
7009 == HealthStatusBuffer.StatusChangeCounter && 7047 == HealthStatusBuffer.StatusChangeCounter &&
7010 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber 7048 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
@@ -7012,21 +7050,28 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
7012 { 7050 {
7013 interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue, 7051 interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
7014 DAC960_MonitoringTimerInterval); 7052 DAC960_MonitoringTimerInterval);
7015 if (signal_pending(current)) return -EINTR; 7053 if (signal_pending(current)) {
7054 ErrorCode = -EINTR;
7055 break;
7056 }
7016 } 7057 }
7017 if (copy_to_user(GetHealthStatus.HealthStatusBuffer, 7058 if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
7018 Controller->V2.HealthStatusBuffer, 7059 Controller->V2.HealthStatusBuffer,
7019 sizeof(DAC960_V2_HealthStatusBuffer_T))) 7060 sizeof(DAC960_V2_HealthStatusBuffer_T)))
7020 return -EFAULT; 7061 ErrorCode = -EFAULT;
7021 return 0; 7062 else
7063 ErrorCode = 0;
7022 } 7064 }
7065 default:
7066 ErrorCode = -ENOTTY;
7023 } 7067 }
7024 return -EINVAL; 7068 unlock_kernel();
7069 return ErrorCode;
7025} 7070}
7026 7071
7027static const struct file_operations DAC960_gam_fops = { 7072static const struct file_operations DAC960_gam_fops = {
7028 .owner = THIS_MODULE, 7073 .owner = THIS_MODULE,
7029 .ioctl = DAC960_gam_ioctl 7074 .unlocked_ioctl = DAC960_gam_ioctl
7030}; 7075};
7031 7076
7032static struct miscdevice DAC960_gam_dev = { 7077static struct miscdevice DAC960_gam_dev = {
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index e8e60e7a2e70..d1de68a31920 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -7,6 +7,7 @@
7#include <linux/hdreg.h> 7#include <linux/hdreg.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/smp_lock.h>
10#include "aoe.h" 11#include "aoe.h"
11 12
12enum { 13enum {
@@ -174,12 +175,16 @@ aoechr_open(struct inode *inode, struct file *filp)
174{ 175{
175 int n, i; 176 int n, i;
176 177
178 lock_kernel();
177 n = iminor(inode); 179 n = iminor(inode);
178 filp->private_data = (void *) (unsigned long) n; 180 filp->private_data = (void *) (unsigned long) n;
179 181
180 for (i = 0; i < ARRAY_SIZE(chardevs); ++i) 182 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
181 if (chardevs[i].minor == n) 183 if (chardevs[i].minor == n) {
184 unlock_kernel();
182 return 0; 185 return 0;
186 }
187 unlock_kernel();
183 return -EINVAL; 188 return -EINVAL;
184} 189}
185 190
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 41f818be2f7e..2f1746295d06 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1003,7 +1003,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
1003 * Enough people have their dip switches set backwards to 1003 * Enough people have their dip switches set backwards to
1004 * warrant a loud message for this special case. 1004 * warrant a loud message for this special case.
1005 */ 1005 */
1006 aoemajor = be16_to_cpu(get_unaligned(&h->major)); 1006 aoemajor = get_unaligned_be16(&h->major);
1007 if (aoemajor == 0xfff) { 1007 if (aoemajor == 0xfff) {
1008 printk(KERN_ERR "aoe: Warning: shelf address is all ones. " 1008 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1009 "Check shelf dip switches.\n"); 1009 "Check shelf dip switches.\n");
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index ab86e23ddc69..9d92636350e5 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -162,6 +162,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
162#include <linux/pg.h> 162#include <linux/pg.h>
163#include <linux/device.h> 163#include <linux/device.h>
164#include <linux/sched.h> /* current, TASK_* */ 164#include <linux/sched.h> /* current, TASK_* */
165#include <linux/smp_lock.h>
165#include <linux/jiffies.h> 166#include <linux/jiffies.h>
166 167
167#include <asm/uaccess.h> 168#include <asm/uaccess.h>
@@ -515,12 +516,18 @@ static int pg_open(struct inode *inode, struct file *file)
515{ 516{
516 int unit = iminor(inode) & 0x7f; 517 int unit = iminor(inode) & 0x7f;
517 struct pg *dev = &devices[unit]; 518 struct pg *dev = &devices[unit];
519 int ret = 0;
518 520
519 if ((unit >= PG_UNITS) || (!dev->present)) 521 lock_kernel();
520 return -ENODEV; 522 if ((unit >= PG_UNITS) || (!dev->present)) {
523 ret = -ENODEV;
524 goto out;
525 }
521 526
522 if (test_and_set_bit(0, &dev->access)) 527 if (test_and_set_bit(0, &dev->access)) {
523 return -EBUSY; 528 ret = -EBUSY;
529 goto out;
530 }
524 531
525 if (dev->busy) { 532 if (dev->busy) {
526 pg_reset(dev); 533 pg_reset(dev);
@@ -533,12 +540,15 @@ static int pg_open(struct inode *inode, struct file *file)
533 if (dev->bufptr == NULL) { 540 if (dev->bufptr == NULL) {
534 clear_bit(0, &dev->access); 541 clear_bit(0, &dev->access);
535 printk("%s: buffer allocation failed\n", dev->name); 542 printk("%s: buffer allocation failed\n", dev->name);
536 return -ENOMEM; 543 ret = -ENOMEM;
544 goto out;
537 } 545 }
538 546
539 file->private_data = dev; 547 file->private_data = dev;
540 548
541 return 0; 549out:
550 unlock_kernel();
551 return ret;
542} 552}
543 553
544static int pg_release(struct inode *inode, struct file *file) 554static int pg_release(struct inode *inode, struct file *file)
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 8b9549ab4a4e..5c74c3574a5a 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -146,6 +146,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
146#include <linux/mtio.h> 146#include <linux/mtio.h>
147#include <linux/device.h> 147#include <linux/device.h>
148#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */ 148#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
149#include <linux/smp_lock.h>
149 150
150#include <asm/uaccess.h> 151#include <asm/uaccess.h>
151 152
@@ -189,8 +190,7 @@ module_param_array(drive3, int, NULL, 0);
189#define ATAPI_LOG_SENSE 0x4d 190#define ATAPI_LOG_SENSE 0x4d
190 191
191static int pt_open(struct inode *inode, struct file *file); 192static int pt_open(struct inode *inode, struct file *file);
192static int pt_ioctl(struct inode *inode, struct file *file, 193static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
193 unsigned int cmd, unsigned long arg);
194static int pt_release(struct inode *inode, struct file *file); 194static int pt_release(struct inode *inode, struct file *file);
195static ssize_t pt_read(struct file *filp, char __user *buf, 195static ssize_t pt_read(struct file *filp, char __user *buf,
196 size_t count, loff_t * ppos); 196 size_t count, loff_t * ppos);
@@ -236,7 +236,7 @@ static const struct file_operations pt_fops = {
236 .owner = THIS_MODULE, 236 .owner = THIS_MODULE,
237 .read = pt_read, 237 .read = pt_read,
238 .write = pt_write, 238 .write = pt_write,
239 .ioctl = pt_ioctl, 239 .unlocked_ioctl = pt_ioctl,
240 .open = pt_open, 240 .open = pt_open,
241 .release = pt_release, 241 .release = pt_release,
242}; 242};
@@ -650,8 +650,11 @@ static int pt_open(struct inode *inode, struct file *file)
650 struct pt_unit *tape = pt + unit; 650 struct pt_unit *tape = pt + unit;
651 int err; 651 int err;
652 652
653 if (unit >= PT_UNITS || (!tape->present)) 653 lock_kernel();
654 if (unit >= PT_UNITS || (!tape->present)) {
655 unlock_kernel();
654 return -ENODEV; 656 return -ENODEV;
657 }
655 658
656 err = -EBUSY; 659 err = -EBUSY;
657 if (!atomic_dec_and_test(&tape->available)) 660 if (!atomic_dec_and_test(&tape->available))
@@ -678,15 +681,16 @@ static int pt_open(struct inode *inode, struct file *file)
678 } 681 }
679 682
680 file->private_data = tape; 683 file->private_data = tape;
684 unlock_kernel();
681 return 0; 685 return 0;
682 686
683out: 687out:
684 atomic_inc(&tape->available); 688 atomic_inc(&tape->available);
689 unlock_kernel();
685 return err; 690 return err;
686} 691}
687 692
688static int pt_ioctl(struct inode *inode, struct file *file, 693static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
689 unsigned int cmd, unsigned long arg)
690{ 694{
691 struct pt_unit *tape = file->private_data; 695 struct pt_unit *tape = file->private_data;
692 struct mtop __user *p = (void __user *)arg; 696 struct mtop __user *p = (void __user *)arg;
@@ -700,23 +704,26 @@ static int pt_ioctl(struct inode *inode, struct file *file,
700 switch (mtop.mt_op) { 704 switch (mtop.mt_op) {
701 705
702 case MTREW: 706 case MTREW:
707 lock_kernel();
703 pt_rewind(tape); 708 pt_rewind(tape);
709 unlock_kernel();
704 return 0; 710 return 0;
705 711
706 case MTWEOF: 712 case MTWEOF:
713 lock_kernel();
707 pt_write_fm(tape); 714 pt_write_fm(tape);
715 unlock_kernel();
708 return 0; 716 return 0;
709 717
710 default: 718 default:
711 printk("%s: Unimplemented mt_op %d\n", tape->name, 719 /* FIXME: rate limit ?? */
720 printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
712 mtop.mt_op); 721 mtop.mt_op);
713 return -EINVAL; 722 return -EINVAL;
714 } 723 }
715 724
716 default: 725 default:
717 printk("%s: Unimplemented ioctl 0x%x\n", tape->name, cmd); 726 return -ENOTTY;
718 return -EINVAL;
719
720 } 727 }
721} 728}
722 729
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3ba1df93e9e3..45bee918c46a 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -49,6 +49,7 @@
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/smp_lock.h>
52#include <linux/errno.h> 53#include <linux/errno.h>
53#include <linux/spinlock.h> 54#include <linux/spinlock.h>
54#include <linux/file.h> 55#include <linux/file.h>
@@ -2079,7 +2080,6 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
2079 unsigned char buf[64]; 2080 unsigned char buf[64];
2080 int ret; 2081 int ret;
2081 2082
2082 memset(buf, 0, sizeof(buf));
2083 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 2083 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
2084 cgc.sense = &sense; 2084 cgc.sense = &sense;
2085 cgc.buflen = pd->mode_offset + 12; 2085 cgc.buflen = pd->mode_offset + 12;
@@ -2126,7 +2126,6 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
2126 unsigned char *cap_buf; 2126 unsigned char *cap_buf;
2127 int ret, offset; 2127 int ret, offset;
2128 2128
2129 memset(buf, 0, sizeof(buf));
2130 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; 2129 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
2131 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); 2130 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
2132 cgc.sense = &sense; 2131 cgc.sense = &sense;
@@ -2633,11 +2632,12 @@ end_io:
2633 2632
2634 2633
2635 2634
2636static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec) 2635static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2636 struct bio_vec *bvec)
2637{ 2637{
2638 struct pktcdvd_device *pd = q->queuedata; 2638 struct pktcdvd_device *pd = q->queuedata;
2639 sector_t zone = ZONE(bio->bi_sector, pd); 2639 sector_t zone = ZONE(bmd->bi_sector, pd);
2640 int used = ((bio->bi_sector - zone) << 9) + bio->bi_size; 2640 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2641 int remaining = (pd->settings.size << 9) - used; 2641 int remaining = (pd->settings.size << 9) - used;
2642 int remaining2; 2642 int remaining2;
2643 2643
@@ -2645,7 +2645,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_v
2645 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet 2645 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2646 * boundary, pkt_make_request() will split the bio. 2646 * boundary, pkt_make_request() will split the bio.
2647 */ 2647 */
2648 remaining2 = PAGE_SIZE - bio->bi_size; 2648 remaining2 = PAGE_SIZE - bmd->bi_size;
2649 remaining = max(remaining, remaining2); 2649 remaining = max(remaining, remaining2);
2650 2650
2651 BUG_ON(remaining < 0); 2651 BUG_ON(remaining < 0);
@@ -2796,9 +2796,14 @@ out_mem:
2796 return ret; 2796 return ret;
2797} 2797}
2798 2798
2799static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 2799static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2800{ 2800{
2801 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2801 struct inode *inode = file->f_path.dentry->d_inode;
2802 struct pktcdvd_device *pd;
2803 long ret;
2804
2805 lock_kernel();
2806 pd = inode->i_bdev->bd_disk->private_data;
2802 2807
2803 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2808 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2804 2809
@@ -2811,7 +2816,8 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2811 case CDROM_LAST_WRITTEN: 2816 case CDROM_LAST_WRITTEN:
2812 case CDROM_SEND_PACKET: 2817 case CDROM_SEND_PACKET:
2813 case SCSI_IOCTL_SEND_COMMAND: 2818 case SCSI_IOCTL_SEND_COMMAND:
2814 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2819 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2820 break;
2815 2821
2816 case CDROMEJECT: 2822 case CDROMEJECT:
2817 /* 2823 /*
@@ -2820,14 +2826,15 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2820 */ 2826 */
2821 if (pd->refcnt == 1) 2827 if (pd->refcnt == 1)
2822 pkt_lock_door(pd, 0); 2828 pkt_lock_door(pd, 0);
2823 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2829 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2830 break;
2824 2831
2825 default: 2832 default:
2826 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2833 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2827 return -ENOTTY; 2834 ret = -ENOTTY;
2828 } 2835 }
2829 2836 unlock_kernel();
2830 return 0; 2837 return ret;
2831} 2838}
2832 2839
2833static int pkt_media_changed(struct gendisk *disk) 2840static int pkt_media_changed(struct gendisk *disk)
@@ -2849,7 +2856,7 @@ static struct block_device_operations pktcdvd_ops = {
2849 .owner = THIS_MODULE, 2856 .owner = THIS_MODULE,
2850 .open = pkt_open, 2857 .open = pkt_open,
2851 .release = pkt_close, 2858 .release = pkt_close,
2852 .ioctl = pkt_ioctl, 2859 .unlocked_ioctl = pkt_ioctl,
2853 .media_changed = pkt_media_changed, 2860 .media_changed = pkt_media_changed,
2854}; 2861};
2855 2862
@@ -3014,7 +3021,8 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
3014 mutex_unlock(&ctl_mutex); 3021 mutex_unlock(&ctl_mutex);
3015} 3022}
3016 3023
3017static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 3024static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3025 unsigned long arg)
3018{ 3026{
3019 void __user *argp = (void __user *)arg; 3027 void __user *argp = (void __user *)arg;
3020 struct pkt_ctrl_command ctrl_cmd; 3028 struct pkt_ctrl_command ctrl_cmd;
@@ -3031,16 +3039,22 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
3031 case PKT_CTRL_CMD_SETUP: 3039 case PKT_CTRL_CMD_SETUP:
3032 if (!capable(CAP_SYS_ADMIN)) 3040 if (!capable(CAP_SYS_ADMIN))
3033 return -EPERM; 3041 return -EPERM;
3042 lock_kernel();
3034 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 3043 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
3035 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 3044 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
3045 unlock_kernel();
3036 break; 3046 break;
3037 case PKT_CTRL_CMD_TEARDOWN: 3047 case PKT_CTRL_CMD_TEARDOWN:
3038 if (!capable(CAP_SYS_ADMIN)) 3048 if (!capable(CAP_SYS_ADMIN))
3039 return -EPERM; 3049 return -EPERM;
3050 lock_kernel();
3040 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 3051 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
3052 unlock_kernel();
3041 break; 3053 break;
3042 case PKT_CTRL_CMD_STATUS: 3054 case PKT_CTRL_CMD_STATUS:
3055 lock_kernel();
3043 pkt_get_status(&ctrl_cmd); 3056 pkt_get_status(&ctrl_cmd);
3057 unlock_kernel();
3044 break; 3058 break;
3045 default: 3059 default:
3046 return -ENOTTY; 3060 return -ENOTTY;
@@ -3053,7 +3067,7 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
3053 3067
3054 3068
3055static const struct file_operations pkt_ctl_fops = { 3069static const struct file_operations pkt_ctl_fops = {
3056 .ioctl = pkt_ctl_ioctl, 3070 .unlocked_ioctl = pkt_ctl_ioctl,
3057 .owner = THIS_MODULE, 3071 .owner = THIS_MODULE,
3058}; 3072};
3059 3073
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f2fff5799ddf..9ae05c584234 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/hdreg.h> 40#include <linux/hdreg.h>
41#include <linux/cdrom.h>
41#include <linux/module.h> 42#include <linux/module.h>
42 43
43#include <xen/xenbus.h> 44#include <xen/xenbus.h>
@@ -153,6 +154,40 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
153 return 0; 154 return 0;
154} 155}
155 156
157int blkif_ioctl(struct inode *inode, struct file *filep,
158 unsigned command, unsigned long argument)
159{
160 struct blkfront_info *info =
161 inode->i_bdev->bd_disk->private_data;
162 int i;
163
164 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
165 command, (long)argument);
166
167 switch (command) {
168 case CDROMMULTISESSION:
169 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
170 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
171 if (put_user(0, (char __user *)(argument + i)))
172 return -EFAULT;
173 return 0;
174
175 case CDROM_GET_CAPABILITY: {
176 struct gendisk *gd = info->gd;
177 if (gd->flags & GENHD_FL_CD)
178 return 0;
179 return -EINVAL;
180 }
181
182 default:
183 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
184 command);*/
185 return -EINVAL; /* same return as native Linux */
186 }
187
188 return 0;
189}
190
156/* 191/*
157 * blkif_queue_request 192 * blkif_queue_request
158 * 193 *
@@ -324,6 +359,9 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
324 /* Make sure buffer addresses are sector-aligned. */ 359 /* Make sure buffer addresses are sector-aligned. */
325 blk_queue_dma_alignment(rq, 511); 360 blk_queue_dma_alignment(rq, 511);
326 361
362 /* Make sure we don't use bounce buffers. */
363 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
364
327 gd->queue = rq; 365 gd->queue = rq;
328 366
329 return 0; 367 return 0;
@@ -546,7 +584,7 @@ static int setup_blkring(struct xenbus_device *dev,
546 584
547 info->ring_ref = GRANT_INVALID_REF; 585 info->ring_ref = GRANT_INVALID_REF;
548 586
549 sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL); 587 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
550 if (!sring) { 588 if (!sring) {
551 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 589 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
552 return -ENOMEM; 590 return -ENOMEM;
@@ -703,7 +741,8 @@ static int blkif_recover(struct blkfront_info *info)
703 int j; 741 int j;
704 742
705 /* Stage 1: Make a safe copy of the shadow state. */ 743 /* Stage 1: Make a safe copy of the shadow state. */
706 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL); 744 copy = kmalloc(sizeof(info->shadow),
745 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
707 if (!copy) 746 if (!copy)
708 return -ENOMEM; 747 return -ENOMEM;
709 memcpy(copy, info->shadow, sizeof(info->shadow)); 748 memcpy(copy, info->shadow, sizeof(info->shadow));
@@ -959,7 +998,7 @@ static int blkif_release(struct inode *inode, struct file *filep)
959 struct xenbus_device *dev = info->xbdev; 998 struct xenbus_device *dev = info->xbdev;
960 enum xenbus_state state = xenbus_read_driver_state(dev->otherend); 999 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
961 1000
962 if (state == XenbusStateClosing) 1001 if (state == XenbusStateClosing && info->is_ready)
963 blkfront_closing(dev); 1002 blkfront_closing(dev);
964 } 1003 }
965 return 0; 1004 return 0;
@@ -971,6 +1010,7 @@ static struct block_device_operations xlvbd_block_fops =
971 .open = blkif_open, 1010 .open = blkif_open,
972 .release = blkif_release, 1011 .release = blkif_release,
973 .getgeo = blkif_getgeo, 1012 .getgeo = blkif_getgeo,
1013 .ioctl = blkif_ioctl,
974}; 1014};
975 1015
976 1016
@@ -1006,7 +1046,7 @@ static int __init xlblk_init(void)
1006module_init(xlblk_init); 1046module_init(xlblk_init);
1007 1047
1008 1048
1009static void xlblk_exit(void) 1049static void __exit xlblk_exit(void)
1010{ 1050{
1011 return xenbus_unregister_driver(&blkfront); 1051 return xenbus_unregister_driver(&blkfront);
1012} 1052}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 0638730a4a19..d97700aa54a9 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -28,6 +28,7 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
31#include <linux/types.h> 32#include <linux/types.h>
32#include <linux/errno.h> 33#include <linux/errno.h>
33#include <linux/sched.h> 34#include <linux/sched.h>
@@ -263,9 +264,11 @@ static int vhci_open(struct inode *inode, struct file *file)
263 skb_queue_head_init(&data->readq); 264 skb_queue_head_init(&data->readq);
264 init_waitqueue_head(&data->read_wait); 265 init_waitqueue_head(&data->read_wait);
265 266
267 lock_kernel();
266 hdev = hci_alloc_dev(); 268 hdev = hci_alloc_dev();
267 if (!hdev) { 269 if (!hdev) {
268 kfree(data); 270 kfree(data);
271 unlock_kernel();
269 return -ENOMEM; 272 return -ENOMEM;
270 } 273 }
271 274
@@ -286,10 +289,12 @@ static int vhci_open(struct inode *inode, struct file *file)
286 BT_ERR("Can't register HCI device"); 289 BT_ERR("Can't register HCI device");
287 kfree(data); 290 kfree(data);
288 hci_free_dev(hdev); 291 hci_free_dev(hdev);
292 unlock_kernel();
289 return -EBUSY; 293 return -EBUSY;
290 } 294 }
291 295
292 file->private_data = data; 296 file->private_data = data;
297 unlock_kernel();
293 298
294 return nonseekable_open(inode, file); 299 return nonseekable_open(inode, file);
295} 300}
@@ -313,18 +318,21 @@ static int vhci_release(struct inode *inode, struct file *file)
313static int vhci_fasync(int fd, struct file *file, int on) 318static int vhci_fasync(int fd, struct file *file, int on)
314{ 319{
315 struct vhci_data *data = file->private_data; 320 struct vhci_data *data = file->private_data;
316 int err; 321 int err = 0;
317 322
323 lock_kernel();
318 err = fasync_helper(fd, file, on, &data->fasync); 324 err = fasync_helper(fd, file, on, &data->fasync);
319 if (err < 0) 325 if (err < 0)
320 return err; 326 goto out;
321 327
322 if (on) 328 if (on)
323 data->flags |= VHCI_FASYNC; 329 data->flags |= VHCI_FASYNC;
324 else 330 else
325 data->flags &= ~VHCI_FASYNC; 331 data->flags &= ~VHCI_FASYNC;
326 332
327 return 0; 333out:
334 unlock_kernel();
335 return err;
328} 336}
329 337
330static const struct file_operations vhci_fops = { 338static const struct file_operations vhci_fops = {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 69f26eb6415b..a5da35632651 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -461,37 +461,27 @@ int cdrom_get_media_event(struct cdrom_device_info *cdi,
461 struct media_event_desc *med) 461 struct media_event_desc *med)
462{ 462{
463 struct packet_command cgc; 463 struct packet_command cgc;
464 unsigned char *buffer; 464 unsigned char buffer[8];
465 struct event_header *eh; 465 struct event_header *eh = (struct event_header *) buffer;
466 int ret = 1;
467
468 buffer = kmalloc(8, GFP_KERNEL);
469 if (!buffer)
470 return -ENOMEM;
471 466
472 eh = (struct event_header *)buffer; 467 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
473
474 init_cdrom_command(&cgc, buffer, 8, CGC_DATA_READ);
475 cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; 468 cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
476 cgc.cmd[1] = 1; /* IMMED */ 469 cgc.cmd[1] = 1; /* IMMED */
477 cgc.cmd[4] = 1 << 4; /* media event */ 470 cgc.cmd[4] = 1 << 4; /* media event */
478 cgc.cmd[8] = 8; 471 cgc.cmd[8] = sizeof(buffer);
479 cgc.quiet = 1; 472 cgc.quiet = 1;
480 473
481 if (cdi->ops->generic_packet(cdi, &cgc)) 474 if (cdi->ops->generic_packet(cdi, &cgc))
482 goto err; 475 return 1;
483 476
484 if (be16_to_cpu(eh->data_len) < sizeof(*med)) 477 if (be16_to_cpu(eh->data_len) < sizeof(*med))
485 goto err; 478 return 1;
486 479
487 if (eh->nea || eh->notification_class != 0x4) 480 if (eh->nea || eh->notification_class != 0x4)
488 goto err; 481 return 1;
489 482
490 memcpy(med, buffer + sizeof(*eh), sizeof(*med)); 483 memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
491 ret = 0; 484 return 0;
492err:
493 kfree(buffer);
494 return ret;
495} 485}
496 486
497/* 487/*
@@ -501,82 +491,68 @@ err:
501static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi) 491static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
502{ 492{
503 struct packet_command cgc; 493 struct packet_command cgc;
504 char *buffer; 494 char buffer[16];
505 int ret = 1;
506
507 buffer = kmalloc(16, GFP_KERNEL);
508 if (!buffer)
509 return -ENOMEM;
510 495
511 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 496 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
512 497
513 cgc.timeout = HZ; 498 cgc.timeout = HZ;
514 cgc.quiet = 1; 499 cgc.quiet = 1;
515 500
516 if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) { 501 if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
517 cdi->mrw_mode_page = MRW_MODE_PC; 502 cdi->mrw_mode_page = MRW_MODE_PC;
518 ret = 0; 503 return 0;
519 } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) { 504 } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
520 cdi->mrw_mode_page = MRW_MODE_PC_PRE1; 505 cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
521 ret = 0; 506 return 0;
522 } 507 }
523 kfree(buffer); 508
524 return ret; 509 return 1;
525} 510}
526 511
527static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write) 512static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
528{ 513{
529 struct packet_command cgc; 514 struct packet_command cgc;
530 struct mrw_feature_desc *mfd; 515 struct mrw_feature_desc *mfd;
531 unsigned char *buffer; 516 unsigned char buffer[16];
532 int ret; 517 int ret;
533 518
534 *write = 0; 519 *write = 0;
535 buffer = kmalloc(16, GFP_KERNEL);
536 if (!buffer)
537 return -ENOMEM;
538 520
539 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 521 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
540 522
541 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 523 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
542 cgc.cmd[3] = CDF_MRW; 524 cgc.cmd[3] = CDF_MRW;
543 cgc.cmd[8] = 16; 525 cgc.cmd[8] = sizeof(buffer);
544 cgc.quiet = 1; 526 cgc.quiet = 1;
545 527
546 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 528 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
547 goto err; 529 return ret;
548 530
549 mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)]; 531 mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
550 if (be16_to_cpu(mfd->feature_code) != CDF_MRW) { 532 if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
551 ret = 1; 533 return 1;
552 goto err;
553 }
554 *write = mfd->write; 534 *write = mfd->write;
555 535
556 if ((ret = cdrom_mrw_probe_pc(cdi))) { 536 if ((ret = cdrom_mrw_probe_pc(cdi))) {
557 *write = 0; 537 *write = 0;
538 return ret;
558 } 539 }
559err: 540
560 kfree(buffer); 541 return 0;
561 return ret;
562} 542}
563 543
564static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont) 544static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
565{ 545{
566 struct packet_command cgc; 546 struct packet_command cgc;
567 unsigned char *buffer; 547 unsigned char buffer[12];
568 int ret; 548 int ret;
569 549
570 printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : ""); 550 printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
571 551
572 buffer = kmalloc(12, GFP_KERNEL);
573 if (!buffer)
574 return -ENOMEM;
575
576 /* 552 /*
577 * FmtData bit set (bit 4), format type is 1 553 * FmtData bit set (bit 4), format type is 1
578 */ 554 */
579 init_cdrom_command(&cgc, buffer, 12, CGC_DATA_WRITE); 555 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
580 cgc.cmd[0] = GPCMD_FORMAT_UNIT; 556 cgc.cmd[0] = GPCMD_FORMAT_UNIT;
581 cgc.cmd[1] = (1 << 4) | 1; 557 cgc.cmd[1] = (1 << 4) | 1;
582 558
@@ -603,7 +579,6 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
603 if (ret) 579 if (ret)
604 printk(KERN_INFO "cdrom: bgformat failed\n"); 580 printk(KERN_INFO "cdrom: bgformat failed\n");
605 581
606 kfree(buffer);
607 return ret; 582 return ret;
608} 583}
609 584
@@ -663,17 +638,16 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
663{ 638{
664 struct packet_command cgc; 639 struct packet_command cgc;
665 struct mode_page_header *mph; 640 struct mode_page_header *mph;
666 char *buffer; 641 char buffer[16];
667 int ret, offset, size; 642 int ret, offset, size;
668 643
669 buffer = kmalloc(16, GFP_KERNEL); 644 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
670 if (!buffer)
671 return -ENOMEM;
672 645
673 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 646 cgc.buffer = buffer;
647 cgc.buflen = sizeof(buffer);
674 648
675 if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0))) 649 if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
676 goto err; 650 return ret;
677 651
678 mph = (struct mode_page_header *) buffer; 652 mph = (struct mode_page_header *) buffer;
679 offset = be16_to_cpu(mph->desc_length); 653 offset = be16_to_cpu(mph->desc_length);
@@ -683,70 +657,55 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
683 cgc.buflen = size; 657 cgc.buflen = size;
684 658
685 if ((ret = cdrom_mode_select(cdi, &cgc))) 659 if ((ret = cdrom_mode_select(cdi, &cgc)))
686 goto err; 660 return ret;
687 661
688 printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]); 662 printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
689 ret = 0; 663 return 0;
690err:
691 kfree(buffer);
692 return ret;
693} 664}
694 665
695static int cdrom_get_random_writable(struct cdrom_device_info *cdi, 666static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
696 struct rwrt_feature_desc *rfd) 667 struct rwrt_feature_desc *rfd)
697{ 668{
698 struct packet_command cgc; 669 struct packet_command cgc;
699 char *buffer; 670 char buffer[24];
700 int ret; 671 int ret;
701 672
702 buffer = kmalloc(24, GFP_KERNEL); 673 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
703 if (!buffer)
704 return -ENOMEM;
705
706 init_cdrom_command(&cgc, buffer, 24, CGC_DATA_READ);
707 674
708 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */ 675 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
709 cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */ 676 cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
710 cgc.cmd[8] = 24; /* often 0x18 */ 677 cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
711 cgc.quiet = 1; 678 cgc.quiet = 1;
712 679
713 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 680 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
714 goto err; 681 return ret;
715 682
716 memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd)); 683 memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
717 ret = 0; 684 return 0;
718err:
719 kfree(buffer);
720 return ret;
721} 685}
722 686
723static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi) 687static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
724{ 688{
725 struct packet_command cgc; 689 struct packet_command cgc;
726 char *buffer; 690 char buffer[16];
727 __be16 *feature_code; 691 __be16 *feature_code;
728 int ret; 692 int ret;
729 693
730 buffer = kmalloc(16, GFP_KERNEL); 694 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
731 if (!buffer)
732 return -ENOMEM;
733
734 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
735 695
736 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 696 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
737 cgc.cmd[3] = CDF_HWDM; 697 cgc.cmd[3] = CDF_HWDM;
738 cgc.cmd[8] = 16; 698 cgc.cmd[8] = sizeof(buffer);
739 cgc.quiet = 1; 699 cgc.quiet = 1;
740 700
741 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 701 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
742 goto err; 702 return ret;
743 703
744 feature_code = (__be16 *) &buffer[sizeof(struct feature_header)]; 704 feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
745 if (be16_to_cpu(*feature_code) == CDF_HWDM) 705 if (be16_to_cpu(*feature_code) == CDF_HWDM)
746 ret = 0; 706 return 0;
747err: 707
748 kfree(buffer); 708 return 1;
749 return ret;
750} 709}
751 710
752 711
@@ -837,14 +796,10 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
837static int mo_open_write(struct cdrom_device_info *cdi) 796static int mo_open_write(struct cdrom_device_info *cdi)
838{ 797{
839 struct packet_command cgc; 798 struct packet_command cgc;
840 char *buffer; 799 char buffer[255];
841 int ret; 800 int ret;
842 801
843 buffer = kmalloc(255, GFP_KERNEL); 802 init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
844 if (!buffer)
845 return -ENOMEM;
846
847 init_cdrom_command(&cgc, buffer, 4, CGC_DATA_READ);
848 cgc.quiet = 1; 803 cgc.quiet = 1;
849 804
850 /* 805 /*
@@ -861,15 +816,10 @@ static int mo_open_write(struct cdrom_device_info *cdi)
861 } 816 }
862 817
863 /* drive gave us no info, let the user go ahead */ 818 /* drive gave us no info, let the user go ahead */
864 if (ret) { 819 if (ret)
865 ret = 0; 820 return 0;
866 goto err;
867 }
868 821
869 ret = buffer[3] & 0x80; 822 return buffer[3] & 0x80;
870err:
871 kfree(buffer);
872 return ret;
873} 823}
874 824
875static int cdrom_ram_open_write(struct cdrom_device_info *cdi) 825static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
@@ -892,19 +842,15 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
892static void cdrom_mmc3_profile(struct cdrom_device_info *cdi) 842static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
893{ 843{
894 struct packet_command cgc; 844 struct packet_command cgc;
895 char *buffer; 845 char buffer[32];
896 int ret, mmc3_profile; 846 int ret, mmc3_profile;
897 847
898 buffer = kmalloc(32, GFP_KERNEL); 848 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
899 if (!buffer)
900 return;
901
902 init_cdrom_command(&cgc, buffer, 32, CGC_DATA_READ);
903 849
904 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 850 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
905 cgc.cmd[1] = 0; 851 cgc.cmd[1] = 0;
906 cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */ 852 cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
907 cgc.cmd[8] = 32; /* Allocation Length */ 853 cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
908 cgc.quiet = 1; 854 cgc.quiet = 1;
909 855
910 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 856 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
@@ -913,7 +859,6 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
913 mmc3_profile = (buffer[6] << 8) | buffer[7]; 859 mmc3_profile = (buffer[6] << 8) | buffer[7];
914 860
915 cdi->mmc3_profile = mmc3_profile; 861 cdi->mmc3_profile = mmc3_profile;
916 kfree(buffer);
917} 862}
918 863
919static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi) 864static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
@@ -1628,15 +1573,12 @@ static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned t
1628static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) 1573static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1629{ 1574{
1630 int ret; 1575 int ret;
1631 u_char *buf; 1576 u_char buf[20];
1632 struct packet_command cgc; 1577 struct packet_command cgc;
1633 struct cdrom_device_ops *cdo = cdi->ops; 1578 struct cdrom_device_ops *cdo = cdi->ops;
1634 rpc_state_t *rpc_state; 1579 rpc_state_t rpc_state;
1635
1636 buf = kzalloc(20, GFP_KERNEL);
1637 if (!buf)
1638 return -ENOMEM;
1639 1580
1581 memset(buf, 0, sizeof(buf));
1640 init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ); 1582 init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
1641 1583
1642 switch (ai->type) { 1584 switch (ai->type) {
@@ -1647,7 +1589,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1647 setup_report_key(&cgc, ai->lsa.agid, 0); 1589 setup_report_key(&cgc, ai->lsa.agid, 0);
1648 1590
1649 if ((ret = cdo->generic_packet(cdi, &cgc))) 1591 if ((ret = cdo->generic_packet(cdi, &cgc)))
1650 goto err; 1592 return ret;
1651 1593
1652 ai->lsa.agid = buf[7] >> 6; 1594 ai->lsa.agid = buf[7] >> 6;
1653 /* Returning data, let host change state */ 1595 /* Returning data, let host change state */
@@ -1658,7 +1600,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1658 setup_report_key(&cgc, ai->lsk.agid, 2); 1600 setup_report_key(&cgc, ai->lsk.agid, 2);
1659 1601
1660 if ((ret = cdo->generic_packet(cdi, &cgc))) 1602 if ((ret = cdo->generic_packet(cdi, &cgc)))
1661 goto err; 1603 return ret;
1662 1604
1663 copy_key(ai->lsk.key, &buf[4]); 1605 copy_key(ai->lsk.key, &buf[4]);
1664 /* Returning data, let host change state */ 1606 /* Returning data, let host change state */
@@ -1669,7 +1611,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1669 setup_report_key(&cgc, ai->lsc.agid, 1); 1611 setup_report_key(&cgc, ai->lsc.agid, 1);
1670 1612
1671 if ((ret = cdo->generic_packet(cdi, &cgc))) 1613 if ((ret = cdo->generic_packet(cdi, &cgc)))
1672 goto err; 1614 return ret;
1673 1615
1674 copy_chal(ai->lsc.chal, &buf[4]); 1616 copy_chal(ai->lsc.chal, &buf[4]);
1675 /* Returning data, let host change state */ 1617 /* Returning data, let host change state */
@@ -1686,7 +1628,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1686 cgc.cmd[2] = ai->lstk.lba >> 24; 1628 cgc.cmd[2] = ai->lstk.lba >> 24;
1687 1629
1688 if ((ret = cdo->generic_packet(cdi, &cgc))) 1630 if ((ret = cdo->generic_packet(cdi, &cgc)))
1689 goto err; 1631 return ret;
1690 1632
1691 ai->lstk.cpm = (buf[4] >> 7) & 1; 1633 ai->lstk.cpm = (buf[4] >> 7) & 1;
1692 ai->lstk.cp_sec = (buf[4] >> 6) & 1; 1634 ai->lstk.cp_sec = (buf[4] >> 6) & 1;
@@ -1700,7 +1642,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1700 setup_report_key(&cgc, ai->lsasf.agid, 5); 1642 setup_report_key(&cgc, ai->lsasf.agid, 5);
1701 1643
1702 if ((ret = cdo->generic_packet(cdi, &cgc))) 1644 if ((ret = cdo->generic_packet(cdi, &cgc)))
1703 goto err; 1645 return ret;
1704 1646
1705 ai->lsasf.asf = buf[7] & 1; 1647 ai->lsasf.asf = buf[7] & 1;
1706 break; 1648 break;
@@ -1713,7 +1655,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1713 copy_chal(&buf[4], ai->hsc.chal); 1655 copy_chal(&buf[4], ai->hsc.chal);
1714 1656
1715 if ((ret = cdo->generic_packet(cdi, &cgc))) 1657 if ((ret = cdo->generic_packet(cdi, &cgc)))
1716 goto err; 1658 return ret;
1717 1659
1718 ai->type = DVD_LU_SEND_KEY1; 1660 ai->type = DVD_LU_SEND_KEY1;
1719 break; 1661 break;
@@ -1726,7 +1668,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1726 1668
1727 if ((ret = cdo->generic_packet(cdi, &cgc))) { 1669 if ((ret = cdo->generic_packet(cdi, &cgc))) {
1728 ai->type = DVD_AUTH_FAILURE; 1670 ai->type = DVD_AUTH_FAILURE;
1729 goto err; 1671 return ret;
1730 } 1672 }
1731 ai->type = DVD_AUTH_ESTABLISHED; 1673 ai->type = DVD_AUTH_ESTABLISHED;
1732 break; 1674 break;
@@ -1737,23 +1679,24 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1737 cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); 1679 cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
1738 setup_report_key(&cgc, ai->lsa.agid, 0x3f); 1680 setup_report_key(&cgc, ai->lsa.agid, 0x3f);
1739 if ((ret = cdo->generic_packet(cdi, &cgc))) 1681 if ((ret = cdo->generic_packet(cdi, &cgc)))
1740 goto err; 1682 return ret;
1741 break; 1683 break;
1742 1684
1743 /* Get region settings */ 1685 /* Get region settings */
1744 case DVD_LU_SEND_RPC_STATE: 1686 case DVD_LU_SEND_RPC_STATE:
1745 cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); 1687 cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
1746 setup_report_key(&cgc, 0, 8); 1688 setup_report_key(&cgc, 0, 8);
1689 memset(&rpc_state, 0, sizeof(rpc_state_t));
1690 cgc.buffer = (char *) &rpc_state;
1747 1691
1748 if ((ret = cdo->generic_packet(cdi, &cgc))) 1692 if ((ret = cdo->generic_packet(cdi, &cgc)))
1749 goto err; 1693 return ret;
1750 1694
1751 rpc_state = (rpc_state_t *)buf; 1695 ai->lrpcs.type = rpc_state.type_code;
1752 ai->lrpcs.type = rpc_state->type_code; 1696 ai->lrpcs.vra = rpc_state.vra;
1753 ai->lrpcs.vra = rpc_state->vra; 1697 ai->lrpcs.ucca = rpc_state.ucca;
1754 ai->lrpcs.ucca = rpc_state->ucca; 1698 ai->lrpcs.region_mask = rpc_state.region_mask;
1755 ai->lrpcs.region_mask = rpc_state->region_mask; 1699 ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
1756 ai->lrpcs.rpc_scheme = rpc_state->rpc_scheme;
1757 break; 1700 break;
1758 1701
1759 /* Set region settings */ 1702 /* Set region settings */
@@ -1764,23 +1707,20 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1764 buf[4] = ai->hrpcs.pdrc; 1707 buf[4] = ai->hrpcs.pdrc;
1765 1708
1766 if ((ret = cdo->generic_packet(cdi, &cgc))) 1709 if ((ret = cdo->generic_packet(cdi, &cgc)))
1767 goto err; 1710 return ret;
1768 break; 1711 break;
1769 1712
1770 default: 1713 default:
1771 cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); 1714 cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
1772 ret = -ENOTTY; 1715 return -ENOTTY;
1773 goto err;
1774 } 1716 }
1775 ret = 0; 1717
1776err: 1718 return 0;
1777 kfree(buf);
1778 return ret;
1779} 1719}
1780 1720
1781static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s) 1721static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1782{ 1722{
1783 unsigned char *buf, *base; 1723 unsigned char buf[21], *base;
1784 struct dvd_layer *layer; 1724 struct dvd_layer *layer;
1785 struct packet_command cgc; 1725 struct packet_command cgc;
1786 struct cdrom_device_ops *cdo = cdi->ops; 1726 struct cdrom_device_ops *cdo = cdi->ops;
@@ -1789,11 +1729,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1789 if (layer_num >= DVD_LAYERS) 1729 if (layer_num >= DVD_LAYERS)
1790 return -EINVAL; 1730 return -EINVAL;
1791 1731
1792 buf = kmalloc(21, GFP_KERNEL); 1732 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1793 if (!buf)
1794 return -ENOMEM;
1795
1796 init_cdrom_command(&cgc, buf, 21, CGC_DATA_READ);
1797 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1733 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1798 cgc.cmd[6] = layer_num; 1734 cgc.cmd[6] = layer_num;
1799 cgc.cmd[7] = s->type; 1735 cgc.cmd[7] = s->type;
@@ -1805,7 +1741,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1805 cgc.quiet = 1; 1741 cgc.quiet = 1;
1806 1742
1807 if ((ret = cdo->generic_packet(cdi, &cgc))) 1743 if ((ret = cdo->generic_packet(cdi, &cgc)))
1808 goto err; 1744 return ret;
1809 1745
1810 base = &buf[4]; 1746 base = &buf[4];
1811 layer = &s->physical.layer[layer_num]; 1747 layer = &s->physical.layer[layer_num];
@@ -1829,24 +1765,17 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1829 layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15]; 1765 layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
1830 layer->bca = base[16] >> 7; 1766 layer->bca = base[16] >> 7;
1831 1767
1832 ret = 0; 1768 return 0;
1833err:
1834 kfree(buf);
1835 return ret;
1836} 1769}
1837 1770
1838static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s) 1771static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
1839{ 1772{
1840 int ret; 1773 int ret;
1841 u_char *buf; 1774 u_char buf[8];
1842 struct packet_command cgc; 1775 struct packet_command cgc;
1843 struct cdrom_device_ops *cdo = cdi->ops; 1776 struct cdrom_device_ops *cdo = cdi->ops;
1844 1777
1845 buf = kmalloc(8, GFP_KERNEL); 1778 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1846 if (!buf)
1847 return -ENOMEM;
1848
1849 init_cdrom_command(&cgc, buf, 8, CGC_DATA_READ);
1850 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1779 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1851 cgc.cmd[6] = s->copyright.layer_num; 1780 cgc.cmd[6] = s->copyright.layer_num;
1852 cgc.cmd[7] = s->type; 1781 cgc.cmd[7] = s->type;
@@ -1854,15 +1783,12 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
1854 cgc.cmd[9] = cgc.buflen & 0xff; 1783 cgc.cmd[9] = cgc.buflen & 0xff;
1855 1784
1856 if ((ret = cdo->generic_packet(cdi, &cgc))) 1785 if ((ret = cdo->generic_packet(cdi, &cgc)))
1857 goto err; 1786 return ret;
1858 1787
1859 s->copyright.cpst = buf[4]; 1788 s->copyright.cpst = buf[4];
1860 s->copyright.rmi = buf[5]; 1789 s->copyright.rmi = buf[5];
1861 1790
1862 ret = 0; 1791 return 0;
1863err:
1864 kfree(buf);
1865 return ret;
1866} 1792}
1867 1793
1868static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s) 1794static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
@@ -1894,33 +1820,26 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
1894static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s) 1820static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
1895{ 1821{
1896 int ret; 1822 int ret;
1897 u_char *buf; 1823 u_char buf[4 + 188];
1898 struct packet_command cgc; 1824 struct packet_command cgc;
1899 struct cdrom_device_ops *cdo = cdi->ops; 1825 struct cdrom_device_ops *cdo = cdi->ops;
1900 1826
1901 buf = kmalloc(4 + 188, GFP_KERNEL); 1827 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1902 if (!buf)
1903 return -ENOMEM;
1904
1905 init_cdrom_command(&cgc, buf, 4 + 188, CGC_DATA_READ);
1906 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1828 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1907 cgc.cmd[7] = s->type; 1829 cgc.cmd[7] = s->type;
1908 cgc.cmd[9] = cgc.buflen & 0xff; 1830 cgc.cmd[9] = cgc.buflen & 0xff;
1909 1831
1910 if ((ret = cdo->generic_packet(cdi, &cgc))) 1832 if ((ret = cdo->generic_packet(cdi, &cgc)))
1911 goto err; 1833 return ret;
1912 1834
1913 s->bca.len = buf[0] << 8 | buf[1]; 1835 s->bca.len = buf[0] << 8 | buf[1];
1914 if (s->bca.len < 12 || s->bca.len > 188) { 1836 if (s->bca.len < 12 || s->bca.len > 188) {
1915 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); 1837 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
1916 ret = -EIO; 1838 return -EIO;
1917 goto err;
1918 } 1839 }
1919 memcpy(s->bca.value, &buf[4], s->bca.len); 1840 memcpy(s->bca.value, &buf[4], s->bca.len);
1920 ret = 0; 1841
1921err: 1842 return 0;
1922 kfree(buf);
1923 return ret;
1924} 1843}
1925 1844
1926static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s) 1845static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
@@ -2020,13 +1939,9 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2020{ 1939{
2021 struct cdrom_device_ops *cdo = cdi->ops; 1940 struct cdrom_device_ops *cdo = cdi->ops;
2022 struct packet_command cgc; 1941 struct packet_command cgc;
2023 char *buffer; 1942 char buffer[32];
2024 int ret; 1943 int ret;
2025 1944
2026 buffer = kmalloc(32, GFP_KERNEL);
2027 if (!buffer)
2028 return -ENOMEM;
2029
2030 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 1945 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
2031 cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; 1946 cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
2032 cgc.cmd[1] = 2; /* MSF addressing */ 1947 cgc.cmd[1] = 2; /* MSF addressing */
@@ -2035,7 +1950,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2035 cgc.cmd[8] = 16; 1950 cgc.cmd[8] = 16;
2036 1951
2037 if ((ret = cdo->generic_packet(cdi, &cgc))) 1952 if ((ret = cdo->generic_packet(cdi, &cgc)))
2038 goto err; 1953 return ret;
2039 1954
2040 subchnl->cdsc_audiostatus = cgc.buffer[1]; 1955 subchnl->cdsc_audiostatus = cgc.buffer[1];
2041 subchnl->cdsc_format = CDROM_MSF; 1956 subchnl->cdsc_format = CDROM_MSF;
@@ -2050,10 +1965,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2050 subchnl->cdsc_absaddr.msf.second = cgc.buffer[10]; 1965 subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
2051 subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11]; 1966 subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
2052 1967
2053 ret = 0; 1968 return 0;
2054err:
2055 kfree(buffer);
2056 return ret;
2057} 1969}
2058 1970
2059/* 1971/*
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d38ac5030763..0e0d12a06462 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -102,7 +102,6 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o
102 102
103obj-$(CONFIG_MWAVE) += mwave/ 103obj-$(CONFIG_MWAVE) += mwave/
104obj-$(CONFIG_AGP) += agp/ 104obj-$(CONFIG_AGP) += agp/
105obj-$(CONFIG_DRM) += drm/
106obj-$(CONFIG_PCMCIA) += pcmcia/ 105obj-$(CONFIG_PCMCIA) += pcmcia/
107obj-$(CONFIG_IPMI_HANDLER) += ipmi/ 106obj-$(CONFIG_IPMI_HANDLER) += ipmi/
108 107
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 13665db363d6..481ffe87c716 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,28 +16,9 @@
16#include <asm/page.h> /* PAGE_SIZE */ 16#include <asm/page.h> /* PAGE_SIZE */
17#include <asm/e820.h> 17#include <asm/e820.h>
18#include <asm/k8.h> 18#include <asm/k8.h>
19#include <asm/gart.h>
19#include "agp.h" 20#include "agp.h"
20 21
21/* PTE bits. */
22#define GPTE_VALID 1
23#define GPTE_COHERENT 2
24
25/* Aperture control register bits. */
26#define GARTEN (1<<0)
27#define DISGARTCPU (1<<4)
28#define DISGARTIO (1<<5)
29
30/* GART cache control register bits. */
31#define INVGART (1<<0)
32#define GARTPTEERR (1<<1)
33
34/* K8 On-cpu GART registers */
35#define AMD64_GARTAPERTURECTL 0x90
36#define AMD64_GARTAPERTUREBASE 0x94
37#define AMD64_GARTTABLEBASE 0x98
38#define AMD64_GARTCACHECTL 0x9c
39#define AMD64_GARTEN (1<<0)
40
41/* NVIDIA K8 registers */ 22/* NVIDIA K8 registers */
42#define NVIDIA_X86_64_0_APBASE 0x10 23#define NVIDIA_X86_64_0_APBASE 0x10
43#define NVIDIA_X86_64_1_APBASE1 0x50 24#define NVIDIA_X86_64_1_APBASE1 0x50
@@ -165,29 +146,18 @@ static int amd64_fetch_size(void)
165 * In a multiprocessor x86-64 system, this function gets 146 * In a multiprocessor x86-64 system, this function gets
166 * called once for each CPU. 147 * called once for each CPU.
167 */ 148 */
168static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table) 149static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
169{ 150{
170 u64 aperturebase; 151 u64 aperturebase;
171 u32 tmp; 152 u32 tmp;
172 u64 addr, aper_base; 153 u64 aper_base;
173 154
174 /* Address to map to */ 155 /* Address to map to */
175 pci_read_config_dword (hammer, AMD64_GARTAPERTUREBASE, &tmp); 156 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
176 aperturebase = tmp << 25; 157 aperturebase = tmp << 25;
177 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); 158 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
178 159
179 /* address of the mappings table */ 160 enable_gart_translation(hammer, gatt_table);
180 addr = (u64) gatt_table;
181 addr >>= 12;
182 tmp = (u32) addr<<4;
183 tmp &= ~0xf;
184 pci_write_config_dword (hammer, AMD64_GARTTABLEBASE, tmp);
185
186 /* Enable GART translation for this hammer. */
187 pci_read_config_dword(hammer, AMD64_GARTAPERTURECTL, &tmp);
188 tmp |= GARTEN;
189 tmp &= ~(DISGARTCPU | DISGARTIO);
190 pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
191 161
192 return aper_base; 162 return aper_base;
193} 163}
@@ -226,9 +196,9 @@ static void amd64_cleanup(void)
226 for (i = 0; i < num_k8_northbridges; i++) { 196 for (i = 0; i < num_k8_northbridges; i++) {
227 struct pci_dev *dev = k8_northbridges[i]; 197 struct pci_dev *dev = k8_northbridges[i];
228 /* disable gart translation */ 198 /* disable gart translation */
229 pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp); 199 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
230 tmp &= ~AMD64_GARTEN; 200 tmp &= ~AMD64_GARTEN;
231 pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp); 201 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
232 } 202 }
233} 203}
234 204
@@ -258,24 +228,10 @@ static const struct agp_bridge_driver amd_8151_driver = {
258}; 228};
259 229
260/* Some basic sanity checks for the aperture. */ 230/* Some basic sanity checks for the aperture. */
261static int __devinit aperture_valid(u64 aper, u32 size) 231static int __devinit agp_aperture_valid(u64 aper, u32 size)
262{ 232{
263 if (aper == 0) { 233 if (!aperture_valid(aper, size, 32*1024*1024))
264 printk(KERN_ERR PFX "No aperture\n");
265 return 0;
266 }
267 if (size < 32*1024*1024) {
268 printk(KERN_ERR PFX "Aperture too small (%d MB)\n", size>>20);
269 return 0;
270 }
271 if ((u64)aper + size > 0x100000000ULL) {
272 printk(KERN_ERR PFX "Aperture out of bounds\n");
273 return 0; 234 return 0;
274 }
275 if (e820_any_mapped(aper, aper + size, E820_RAM)) {
276 printk(KERN_ERR PFX "Aperture pointing to RAM\n");
277 return 0;
278 }
279 235
280 /* Request the Aperture. This catches cases when someone else 236 /* Request the Aperture. This catches cases when someone else
281 already put a mapping in there - happens with some very broken BIOS 237 already put a mapping in there - happens with some very broken BIOS
@@ -308,11 +264,11 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
308 u32 nb_order, nb_base; 264 u32 nb_order, nb_base;
309 u16 apsize; 265 u16 apsize;
310 266
311 pci_read_config_dword(nb, 0x90, &nb_order); 267 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
312 nb_order = (nb_order >> 1) & 7; 268 nb_order = (nb_order >> 1) & 7;
313 pci_read_config_dword(nb, 0x94, &nb_base); 269 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
314 nb_aper = nb_base << 25; 270 nb_aper = nb_base << 25;
315 if (aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) { 271 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
316 return 0; 272 return 0;
317 } 273 }
318 274
@@ -331,12 +287,23 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
331 pci_read_config_dword(agp, 0x10, &aper_low); 287 pci_read_config_dword(agp, 0x10, &aper_low);
332 pci_read_config_dword(agp, 0x14, &aper_hi); 288 pci_read_config_dword(agp, 0x14, &aper_hi);
333 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); 289 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
290
291 /*
292 * On some sick chips APSIZE is 0. This means it wants 4G
293 * so let double check that order, and lets trust the AMD NB settings
294 */
295 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
296 printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n",
297 32 << order);
298 order = nb_order;
299 }
300
334 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); 301 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
335 if (order < 0 || !aperture_valid(aper, (32*1024*1024)<<order)) 302 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
336 return -1; 303 return -1;
337 304
338 pci_write_config_dword(nb, 0x90, order << 1); 305 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1);
339 pci_write_config_dword(nb, 0x94, aper >> 25); 306 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
340 307
341 return 0; 308 return 0;
342} 309}
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index e6cb1ab03e06..a96f3197e60f 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,6 +39,7 @@
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/smp_lock.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/pgtable.h> 44#include <asm/pgtable.h>
44#include "agp.h" 45#include "agp.h"
@@ -677,6 +678,7 @@ static int agp_open(struct inode *inode, struct file *file)
677 struct agp_client *client; 678 struct agp_client *client;
678 int rc = -ENXIO; 679 int rc = -ENXIO;
679 680
681 lock_kernel();
680 mutex_lock(&(agp_fe.agp_mutex)); 682 mutex_lock(&(agp_fe.agp_mutex));
681 683
682 if (minor != AGPGART_MINOR) 684 if (minor != AGPGART_MINOR)
@@ -703,12 +705,14 @@ static int agp_open(struct inode *inode, struct file *file)
703 agp_insert_file_private(priv); 705 agp_insert_file_private(priv);
704 DBG("private=%p, client=%p", priv, client); 706 DBG("private=%p, client=%p", priv, client);
705 mutex_unlock(&(agp_fe.agp_mutex)); 707 mutex_unlock(&(agp_fe.agp_mutex));
708 unlock_kernel();
706 return 0; 709 return 0;
707 710
708err_out_nomem: 711err_out_nomem:
709 rc = -ENOMEM; 712 rc = -ENOMEM;
710err_out: 713err_out:
711 mutex_unlock(&(agp_fe.agp_mutex)); 714 mutex_unlock(&(agp_fe.agp_mutex));
715 unlock_kernel();
712 return rc; 716 return rc;
713} 717}
714 718
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index cdd876dbb2b0..da8a1658a273 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/smp_lock.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
18#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
@@ -416,6 +417,7 @@ static int apm_open(struct inode * inode, struct file * filp)
416{ 417{
417 struct apm_user *as; 418 struct apm_user *as;
418 419
420 lock_kernel();
419 as = kzalloc(sizeof(*as), GFP_KERNEL); 421 as = kzalloc(sizeof(*as), GFP_KERNEL);
420 if (as) { 422 if (as) {
421 /* 423 /*
@@ -435,6 +437,7 @@ static int apm_open(struct inode * inode, struct file * filp)
435 437
436 filp->private_data = as; 438 filp->private_data = as;
437 } 439 }
440 unlock_kernel();
438 441
439 return as ? 0 : -ENOMEM; 442 return as ? 0 : -ENOMEM;
440} 443}
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index b6f2639f903d..d8cff909001c 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9#include <linux/smp_lock.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <linux/tty.h> 12#include <linux/tty.h>
@@ -67,11 +68,15 @@ static void set_led(char state)
67 68
68static int briq_panel_open(struct inode *ino, struct file *filep) 69static int briq_panel_open(struct inode *ino, struct file *filep)
69{ 70{
70 /* enforce single access */ 71 lock_kernel();
71 if (vfd_is_open) 72 /* enforce single access, vfd_is_open is protected by BKL */
73 if (vfd_is_open) {
74 unlock_kernel();
72 return -EBUSY; 75 return -EBUSY;
76 }
73 vfd_is_open = 1; 77 vfd_is_open = 1;
74 78
79 unlock_kernel();
75 return 0; 80 return 0;
76} 81}
77 82
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index c0a4a0bb509e..04ba906b4880 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -17,6 +17,7 @@
17#include <linux/cdev.h> 17#include <linux/cdev.h>
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/smp_lock.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/io.h> 22#include <asm/io.h>
22 23
@@ -157,6 +158,7 @@ static int cs5535_gpio_open(struct inode *inode, struct file *file)
157{ 158{
158 u32 m = iminor(inode); 159 u32 m = iminor(inode);
159 160
161 cycle_kernel_lock();
160 /* the mask says which pins are usable by this driver */ 162 /* the mask says which pins are usable by this driver */
161 if ((mask & (1 << m)) == 0) 163 if ((mask & (1 << m)) == 0)
162 return -EINVAL; 164 return -EINVAL;
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
deleted file mode 100644
index 1283ded88ead..000000000000
--- a/drivers/char/drm/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
6 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
10
11tdfx-objs := tdfx_drv.o
12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
13mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
14i810-objs := i810_drv.o i810_dma.o
15i830-objs := i830_drv.o i830_dma.o i830_irq.o
16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
18sis-objs := sis_drv.o sis_mm.o
19savage-objs := savage_drv.o savage_bci.o savage_state.o
20via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
21
22ifeq ($(CONFIG_COMPAT),y)
23drm-objs += drm_ioc32.o
24radeon-objs += radeon_ioc32.o
25mga-objs += mga_ioc32.o
26r128-objs += r128_ioc32.o
27i915-objs += i915_ioc32.o
28endif
29
30obj-$(CONFIG_DRM) += drm.o
31obj-$(CONFIG_DRM_TDFX) += tdfx.o
32obj-$(CONFIG_DRM_R128) += r128.o
33obj-$(CONFIG_DRM_RADEON)+= radeon.o
34obj-$(CONFIG_DRM_MGA) += mga.o
35obj-$(CONFIG_DRM_I810) += i810.o
36obj-$(CONFIG_DRM_I830) += i830.o
37obj-$(CONFIG_DRM_I915) += i915.o
38obj-$(CONFIG_DRM_SIS) += sis.o
39obj-$(CONFIG_DRM_SAVAGE)+= savage.o
40obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
deleted file mode 100644
index 38d3c6b8276a..000000000000
--- a/drivers/char/drm/drm.h
+++ /dev/null
@@ -1,694 +0,0 @@
1/**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 *
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9 */
10
11/*
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#ifndef _DRM_H_
37#define _DRM_H_
38
39#if defined(__linux__)
40#if defined(__KERNEL__)
41#endif
42#include <asm/ioctl.h> /* For _IO* macros */
43#define DRM_IOCTL_NR(n) _IOC_NR(n)
44#define DRM_IOC_VOID _IOC_NONE
45#define DRM_IOC_READ _IOC_READ
46#define DRM_IOC_WRITE _IOC_WRITE
47#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
48#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
49#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
50#if defined(__FreeBSD__) && defined(IN_MODULE)
51/* Prevent name collision when including sys/ioccom.h */
52#undef ioctl
53#include <sys/ioccom.h>
54#define ioctl(a,b,c) xf86ioctl(a,b,c)
55#else
56#include <sys/ioccom.h>
57#endif /* __FreeBSD__ && xf86ioctl */
58#define DRM_IOCTL_NR(n) ((n) & 0xff)
59#define DRM_IOC_VOID IOC_VOID
60#define DRM_IOC_READ IOC_OUT
61#define DRM_IOC_WRITE IOC_IN
62#define DRM_IOC_READWRITE IOC_INOUT
63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
64#endif
65
66#define DRM_MAJOR 226
67#define DRM_MAX_MINOR 15
68
69#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
70#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
71#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
72#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
73
74#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
75#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
76#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
77#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
78#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
79
80typedef unsigned int drm_handle_t;
81typedef unsigned int drm_context_t;
82typedef unsigned int drm_drawable_t;
83typedef unsigned int drm_magic_t;
84
85/**
86 * Cliprect.
87 *
88 * \warning: If you change this structure, make sure you change
89 * XF86DRIClipRectRec in the server as well
90 *
91 * \note KW: Actually it's illegal to change either for
92 * backwards-compatibility reasons.
93 */
94struct drm_clip_rect {
95 unsigned short x1;
96 unsigned short y1;
97 unsigned short x2;
98 unsigned short y2;
99};
100
101/**
102 * Drawable information.
103 */
104struct drm_drawable_info {
105 unsigned int num_rects;
106 struct drm_clip_rect *rects;
107};
108
109/**
110 * Texture region,
111 */
112struct drm_tex_region {
113 unsigned char next;
114 unsigned char prev;
115 unsigned char in_use;
116 unsigned char padding;
117 unsigned int age;
118};
119
120/**
121 * Hardware lock.
122 *
123 * The lock structure is a simple cache-line aligned integer. To avoid
124 * processor bus contention on a multiprocessor system, there should not be any
125 * other data stored in the same cache line.
126 */
127struct drm_hw_lock {
128 __volatile__ unsigned int lock; /**< lock variable */
129 char padding[60]; /**< Pad to cache line */
130};
131
132/**
133 * DRM_IOCTL_VERSION ioctl argument type.
134 *
135 * \sa drmGetVersion().
136 */
137struct drm_version {
138 int version_major; /**< Major version */
139 int version_minor; /**< Minor version */
140 int version_patchlevel; /**< Patch level */
141 size_t name_len; /**< Length of name buffer */
142 char __user *name; /**< Name of driver */
143 size_t date_len; /**< Length of date buffer */
144 char __user *date; /**< User-space buffer to hold date */
145 size_t desc_len; /**< Length of desc buffer */
146 char __user *desc; /**< User-space buffer to hold desc */
147};
148
149/**
150 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
151 *
152 * \sa drmGetBusid() and drmSetBusId().
153 */
154struct drm_unique {
155 size_t unique_len; /**< Length of unique */
156 char __user *unique; /**< Unique name for driver instantiation */
157};
158
159struct drm_list {
160 int count; /**< Length of user-space structures */
161 struct drm_version __user *version;
162};
163
164struct drm_block {
165 int unused;
166};
167
168/**
169 * DRM_IOCTL_CONTROL ioctl argument type.
170 *
171 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
172 */
173struct drm_control {
174 enum {
175 DRM_ADD_COMMAND,
176 DRM_RM_COMMAND,
177 DRM_INST_HANDLER,
178 DRM_UNINST_HANDLER
179 } func;
180 int irq;
181};
182
183/**
184 * Type of memory to map.
185 */
186enum drm_map_type {
187 _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
188 _DRM_REGISTERS = 1, /**< no caching, no core dump */
189 _DRM_SHM = 2, /**< shared, cached */
190 _DRM_AGP = 3, /**< AGP/GART */
191 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
192 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
193};
194
195/**
196 * Memory mapping flags.
197 */
198enum drm_map_flags {
199 _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
200 _DRM_READ_ONLY = 0x02,
201 _DRM_LOCKED = 0x04, /**< shared, cached, locked */
202 _DRM_KERNEL = 0x08, /**< kernel requires access */
203 _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
204 _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
205 _DRM_REMOVABLE = 0x40, /**< Removable mapping */
206 _DRM_DRIVER = 0x80 /**< Managed by driver */
207};
208
209struct drm_ctx_priv_map {
210 unsigned int ctx_id; /**< Context requesting private mapping */
211 void *handle; /**< Handle of map */
212};
213
214/**
215 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
216 * argument type.
217 *
218 * \sa drmAddMap().
219 */
220struct drm_map {
221 unsigned long offset; /**< Requested physical address (0 for SAREA)*/
222 unsigned long size; /**< Requested physical size (bytes) */
223 enum drm_map_type type; /**< Type of memory to map */
224 enum drm_map_flags flags; /**< Flags */
225 void *handle; /**< User-space: "Handle" to pass to mmap() */
226 /**< Kernel-space: kernel-virtual address */
227 int mtrr; /**< MTRR slot used */
228 /* Private data */
229};
230
231/**
232 * DRM_IOCTL_GET_CLIENT ioctl argument type.
233 */
234struct drm_client {
235 int idx; /**< Which client desired? */
236 int auth; /**< Is client authenticated? */
237 unsigned long pid; /**< Process ID */
238 unsigned long uid; /**< User ID */
239 unsigned long magic; /**< Magic */
240 unsigned long iocs; /**< Ioctl count */
241};
242
243enum drm_stat_type {
244 _DRM_STAT_LOCK,
245 _DRM_STAT_OPENS,
246 _DRM_STAT_CLOSES,
247 _DRM_STAT_IOCTLS,
248 _DRM_STAT_LOCKS,
249 _DRM_STAT_UNLOCKS,
250 _DRM_STAT_VALUE, /**< Generic value */
251 _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
252 _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
253
254 _DRM_STAT_IRQ, /**< IRQ */
255 _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
256 _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
257 _DRM_STAT_DMA, /**< DMA */
258 _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
259 _DRM_STAT_MISSED /**< Missed DMA opportunity */
260 /* Add to the *END* of the list */
261};
262
263/**
264 * DRM_IOCTL_GET_STATS ioctl argument type.
265 */
266struct drm_stats {
267 unsigned long count;
268 struct {
269 unsigned long value;
270 enum drm_stat_type type;
271 } data[15];
272};
273
274/**
275 * Hardware locking flags.
276 */
277enum drm_lock_flags {
278 _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
279 _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
280 _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
281 _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
282 /* These *HALT* flags aren't supported yet
283 -- they will be used to support the
284 full-screen DGA-like mode. */
285 _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
286 _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
287};
288
289/**
290 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
291 *
292 * \sa drmGetLock() and drmUnlock().
293 */
294struct drm_lock {
295 int context;
296 enum drm_lock_flags flags;
297};
298
299/**
300 * DMA flags
301 *
302 * \warning
303 * These values \e must match xf86drm.h.
304 *
305 * \sa drm_dma.
306 */
307enum drm_dma_flags {
308 /* Flags for DMA buffer dispatch */
309 _DRM_DMA_BLOCK = 0x01, /**<
310 * Block until buffer dispatched.
311 *
312 * \note The buffer may not yet have
313 * been processed by the hardware --
314 * getting a hardware lock with the
315 * hardware quiescent will ensure
316 * that the buffer has been
317 * processed.
318 */
319 _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
320 _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
321
322 /* Flags for DMA buffer request */
323 _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
324 _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
325 _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
326};
327
328/**
329 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
330 *
331 * \sa drmAddBufs().
332 */
333struct drm_buf_desc {
334 int count; /**< Number of buffers of this size */
335 int size; /**< Size in bytes */
336 int low_mark; /**< Low water mark */
337 int high_mark; /**< High water mark */
338 enum {
339 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
340 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
341 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
342 _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
343 _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
344 } flags;
345 unsigned long agp_start; /**<
346 * Start address of where the AGP buffers are
347 * in the AGP aperture
348 */
349};
350
351/**
352 * DRM_IOCTL_INFO_BUFS ioctl argument type.
353 */
354struct drm_buf_info {
355 int count; /**< Entries in list */
356 struct drm_buf_desc __user *list;
357};
358
359/**
360 * DRM_IOCTL_FREE_BUFS ioctl argument type.
361 */
362struct drm_buf_free {
363 int count;
364 int __user *list;
365};
366
367/**
368 * Buffer information
369 *
370 * \sa drm_buf_map.
371 */
372struct drm_buf_pub {
373 int idx; /**< Index into the master buffer list */
374 int total; /**< Buffer size */
375 int used; /**< Amount of buffer in use (for DMA) */
376 void __user *address; /**< Address of buffer */
377};
378
379/**
380 * DRM_IOCTL_MAP_BUFS ioctl argument type.
381 */
382struct drm_buf_map {
383 int count; /**< Length of the buffer list */
384 void __user *virtual; /**< Mmap'd area in user-virtual */
385 struct drm_buf_pub __user *list; /**< Buffer information */
386};
387
388/**
389 * DRM_IOCTL_DMA ioctl argument type.
390 *
391 * Indices here refer to the offset into the buffer list in drm_buf_get.
392 *
393 * \sa drmDMA().
394 */
395struct drm_dma {
396 int context; /**< Context handle */
397 int send_count; /**< Number of buffers to send */
398 int __user *send_indices; /**< List of handles to buffers */
399 int __user *send_sizes; /**< Lengths of data to send */
400 enum drm_dma_flags flags; /**< Flags */
401 int request_count; /**< Number of buffers requested */
402 int request_size; /**< Desired size for buffers */
403 int __user *request_indices; /**< Buffer information */
404 int __user *request_sizes;
405 int granted_count; /**< Number of buffers granted */
406};
407
408enum drm_ctx_flags {
409 _DRM_CONTEXT_PRESERVED = 0x01,
410 _DRM_CONTEXT_2DONLY = 0x02
411};
412
413/**
414 * DRM_IOCTL_ADD_CTX ioctl argument type.
415 *
416 * \sa drmCreateContext() and drmDestroyContext().
417 */
418struct drm_ctx {
419 drm_context_t handle;
420 enum drm_ctx_flags flags;
421};
422
423/**
424 * DRM_IOCTL_RES_CTX ioctl argument type.
425 */
426struct drm_ctx_res {
427 int count;
428 struct drm_ctx __user *contexts;
429};
430
431/**
432 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
433 */
434struct drm_draw {
435 drm_drawable_t handle;
436};
437
438/**
439 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
440 */
441typedef enum {
442 DRM_DRAWABLE_CLIPRECTS,
443} drm_drawable_info_type_t;
444
445struct drm_update_draw {
446 drm_drawable_t handle;
447 unsigned int type;
448 unsigned int num;
449 unsigned long long data;
450};
451
452/**
453 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
454 */
455struct drm_auth {
456 drm_magic_t magic;
457};
458
459/**
460 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
461 *
462 * \sa drmGetInterruptFromBusID().
463 */
464struct drm_irq_busid {
465 int irq; /**< IRQ number */
466 int busnum; /**< bus number */
467 int devnum; /**< device number */
468 int funcnum; /**< function number */
469};
470
471enum drm_vblank_seq_type {
472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
475 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
476 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
477};
478
479#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
480#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
481 _DRM_VBLANK_NEXTONMISS)
482
483struct drm_wait_vblank_request {
484 enum drm_vblank_seq_type type;
485 unsigned int sequence;
486 unsigned long signal;
487};
488
489struct drm_wait_vblank_reply {
490 enum drm_vblank_seq_type type;
491 unsigned int sequence;
492 long tval_sec;
493 long tval_usec;
494};
495
496/**
497 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
498 *
499 * \sa drmWaitVBlank().
500 */
501union drm_wait_vblank {
502 struct drm_wait_vblank_request request;
503 struct drm_wait_vblank_reply reply;
504};
505
506/**
507 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
508 *
509 * \sa drmAgpEnable().
510 */
511struct drm_agp_mode {
512 unsigned long mode; /**< AGP mode */
513};
514
515/**
516 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
517 *
518 * \sa drmAgpAlloc() and drmAgpFree().
519 */
520struct drm_agp_buffer {
521 unsigned long size; /**< In bytes -- will round to page boundary */
522 unsigned long handle; /**< Used for binding / unbinding */
523 unsigned long type; /**< Type of memory to allocate */
524 unsigned long physical; /**< Physical used by i810 */
525};
526
527/**
528 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
529 *
530 * \sa drmAgpBind() and drmAgpUnbind().
531 */
532struct drm_agp_binding {
533 unsigned long handle; /**< From drm_agp_buffer */
534 unsigned long offset; /**< In bytes -- will round to page boundary */
535};
536
537/**
538 * DRM_IOCTL_AGP_INFO ioctl argument type.
539 *
540 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
541 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
542 * drmAgpVendorId() and drmAgpDeviceId().
543 */
544struct drm_agp_info {
545 int agp_version_major;
546 int agp_version_minor;
547 unsigned long mode;
548 unsigned long aperture_base; /* physical address */
549 unsigned long aperture_size; /* bytes */
550 unsigned long memory_allowed; /* bytes */
551 unsigned long memory_used;
552
553 /* PCI information */
554 unsigned short id_vendor;
555 unsigned short id_device;
556};
557
558/**
559 * DRM_IOCTL_SG_ALLOC ioctl argument type.
560 */
561struct drm_scatter_gather {
562 unsigned long size; /**< In bytes -- will round to page boundary */
563 unsigned long handle; /**< Used for mapping / unmapping */
564};
565
566/**
567 * DRM_IOCTL_SET_VERSION ioctl argument type.
568 */
569struct drm_set_version {
570 int drm_di_major;
571 int drm_di_minor;
572 int drm_dd_major;
573 int drm_dd_minor;
574};
575
576#define DRM_IOCTL_BASE 'd'
577#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
578#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
579#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
580#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
581
582#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
583#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
584#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
585#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
586#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
587#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
588#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
589#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
590
591#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
592#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
593#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
594#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
595#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
596#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
597#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
598#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
599#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
600#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
601#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
602
603#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
604
605#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
606#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
607
608#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
609#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
610#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
611#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
612#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
613#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
614#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
615#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
616#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
617#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
618#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
619#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
620#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
621
622#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
623#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
624#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
625#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
626#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
627#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
628#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
629#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
630
631#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
632#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
633
634#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
635
636#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
637
638/**
639 * Device specific ioctls should only be in their respective headers
640 * The device specific ioctl range is from 0x40 to 0x99.
641 * Generic IOCTLS restart at 0xA0.
642 *
643 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
644 * drmCommandReadWrite().
645 */
646#define DRM_COMMAND_BASE 0x40
647#define DRM_COMMAND_END 0xA0
648
649/* typedef area */
650#ifndef __KERNEL__
651typedef struct drm_clip_rect drm_clip_rect_t;
652typedef struct drm_drawable_info drm_drawable_info_t;
653typedef struct drm_tex_region drm_tex_region_t;
654typedef struct drm_hw_lock drm_hw_lock_t;
655typedef struct drm_version drm_version_t;
656typedef struct drm_unique drm_unique_t;
657typedef struct drm_list drm_list_t;
658typedef struct drm_block drm_block_t;
659typedef struct drm_control drm_control_t;
660typedef enum drm_map_type drm_map_type_t;
661typedef enum drm_map_flags drm_map_flags_t;
662typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
663typedef struct drm_map drm_map_t;
664typedef struct drm_client drm_client_t;
665typedef enum drm_stat_type drm_stat_type_t;
666typedef struct drm_stats drm_stats_t;
667typedef enum drm_lock_flags drm_lock_flags_t;
668typedef struct drm_lock drm_lock_t;
669typedef enum drm_dma_flags drm_dma_flags_t;
670typedef struct drm_buf_desc drm_buf_desc_t;
671typedef struct drm_buf_info drm_buf_info_t;
672typedef struct drm_buf_free drm_buf_free_t;
673typedef struct drm_buf_pub drm_buf_pub_t;
674typedef struct drm_buf_map drm_buf_map_t;
675typedef struct drm_dma drm_dma_t;
676typedef union drm_wait_vblank drm_wait_vblank_t;
677typedef struct drm_agp_mode drm_agp_mode_t;
678typedef enum drm_ctx_flags drm_ctx_flags_t;
679typedef struct drm_ctx drm_ctx_t;
680typedef struct drm_ctx_res drm_ctx_res_t;
681typedef struct drm_draw drm_draw_t;
682typedef struct drm_update_draw drm_update_draw_t;
683typedef struct drm_auth drm_auth_t;
684typedef struct drm_irq_busid drm_irq_busid_t;
685typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
686
687typedef struct drm_agp_buffer drm_agp_buffer_t;
688typedef struct drm_agp_binding drm_agp_binding_t;
689typedef struct drm_agp_info drm_agp_info_t;
690typedef struct drm_scatter_gather drm_scatter_gather_t;
691typedef struct drm_set_version drm_set_version_t;
692#endif
693
694#endif
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
deleted file mode 100644
index 0764b662b339..000000000000
--- a/drivers/char/drm/drmP.h
+++ /dev/null
@@ -1,1153 +0,0 @@
1/**
2 * \file drmP.h
3 * Private header for Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All rights reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#ifndef _DRM_P_H_
35#define _DRM_P_H_
36
37/* If you want the memory alloc debug functionality, change define below */
38/* #define DEBUG_MEMORY */
39
40#ifdef __KERNEL__
41#ifdef __alpha__
42/* add include of current.h so that "current" is defined
43 * before static inline funcs in wait.h. Doing this so we
44 * can build the DRM (part of PI DRI). 4/21/2000 S + B */
45#include <asm/current.h>
46#endif /* __alpha__ */
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/miscdevice.h>
50#include <linux/fs.h>
51#include <linux/proc_fs.h>
52#include <linux/init.h>
53#include <linux/file.h>
54#include <linux/pci.h>
55#include <linux/jiffies.h>
56#include <linux/smp_lock.h> /* For (un)lock_kernel */
57#include <linux/dma-mapping.h>
58#include <linux/mm.h>
59#include <linux/cdev.h>
60#include <linux/mutex.h>
61#if defined(__alpha__) || defined(__powerpc__)
62#include <asm/pgtable.h> /* For pte_wrprotect */
63#endif
64#include <asm/io.h>
65#include <asm/mman.h>
66#include <asm/uaccess.h>
67#ifdef CONFIG_MTRR
68#include <asm/mtrr.h>
69#endif
70#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
71#include <linux/types.h>
72#include <linux/agp_backend.h>
73#endif
74#include <linux/workqueue.h>
75#include <linux/poll.h>
76#include <asm/pgalloc.h>
77#include "drm.h"
78
79#include <linux/idr.h>
80
81#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
82#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
83
84struct drm_file;
85struct drm_device;
86
87#include "drm_os_linux.h"
88#include "drm_hashtab.h"
89
90/***********************************************************************/
91/** \name DRM template customization defaults */
92/*@{*/
93
94/* driver capabilities and requirements mask */
95#define DRIVER_USE_AGP 0x1
96#define DRIVER_REQUIRE_AGP 0x2
97#define DRIVER_USE_MTRR 0x4
98#define DRIVER_PCI_DMA 0x8
99#define DRIVER_SG 0x10
100#define DRIVER_HAVE_DMA 0x20
101#define DRIVER_HAVE_IRQ 0x40
102#define DRIVER_IRQ_SHARED 0x80
103#define DRIVER_IRQ_VBL 0x100
104#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800
107
108/***********************************************************************/
109/** \name Begin the DRM... */
110/*@{*/
111
112#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
113 also include looping detection. */
114
115#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
116#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
117#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
118#define DRM_LOOPING_LIMIT 5000000
119#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
120#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
121
122#define DRM_FLAG_DEBUG 0x01
123
124#define DRM_MEM_DMA 0
125#define DRM_MEM_SAREA 1
126#define DRM_MEM_DRIVER 2
127#define DRM_MEM_MAGIC 3
128#define DRM_MEM_IOCTLS 4
129#define DRM_MEM_MAPS 5
130#define DRM_MEM_VMAS 6
131#define DRM_MEM_BUFS 7
132#define DRM_MEM_SEGS 8
133#define DRM_MEM_PAGES 9
134#define DRM_MEM_FILES 10
135#define DRM_MEM_QUEUES 11
136#define DRM_MEM_CMDS 12
137#define DRM_MEM_MAPPINGS 13
138#define DRM_MEM_BUFLISTS 14
139#define DRM_MEM_AGPLISTS 15
140#define DRM_MEM_TOTALAGP 16
141#define DRM_MEM_BOUNDAGP 17
142#define DRM_MEM_CTXBITMAP 18
143#define DRM_MEM_STUB 19
144#define DRM_MEM_SGLISTS 20
145#define DRM_MEM_CTXLIST 21
146#define DRM_MEM_MM 22
147#define DRM_MEM_HASHTAB 23
148
149#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
150#define DRM_MAP_HASH_OFFSET 0x10000000
151
152/*@}*/
153
154/***********************************************************************/
155/** \name Macros to make printk easier */
156/*@{*/
157
158/**
159 * Error output.
160 *
161 * \param fmt printf() like format string.
162 * \param arg arguments
163 */
164#define DRM_ERROR(fmt, arg...) \
165 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
166
167/**
168 * Memory error output.
169 *
170 * \param area memory area where the error occurred.
171 * \param fmt printf() like format string.
172 * \param arg arguments
173 */
174#define DRM_MEM_ERROR(area, fmt, arg...) \
175 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
176 drm_mem_stats[area].name , ##arg)
177
178#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
179
180/**
181 * Debug output.
182 *
183 * \param fmt printf() like format string.
184 * \param arg arguments
185 */
186#if DRM_DEBUG_CODE
187#define DRM_DEBUG(fmt, arg...) \
188 do { \
189 if ( drm_debug ) \
190 printk(KERN_DEBUG \
191 "[" DRM_NAME ":%s] " fmt , \
192 __func__ , ##arg); \
193 } while (0)
194#else
195#define DRM_DEBUG(fmt, arg...) do { } while (0)
196#endif
197
198#define DRM_PROC_LIMIT (PAGE_SIZE-80)
199
200#define DRM_PROC_PRINT(fmt, arg...) \
201 len += sprintf(&buf[len], fmt , ##arg); \
202 if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
203
204#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
205 len += sprintf(&buf[len], fmt , ##arg); \
206 if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
207
208/*@}*/
209
210/***********************************************************************/
211/** \name Internal types and structures */
212/*@{*/
213
214#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
215
216#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
217#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
218#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
219
220#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
221/**
222 * Get the private SAREA mapping.
223 *
224 * \param _dev DRM device.
225 * \param _ctx context number.
226 * \param _map output mapping.
227 */
228#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
229 (_map) = (_dev)->context_sareas[_ctx]; \
230} while(0)
231
232/**
233 * Test that the hardware lock is held by the caller, returning otherwise.
234 *
235 * \param dev DRM device.
236 * \param filp file pointer of the caller.
237 */
238#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
239do { \
240 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
241 dev->lock.file_priv != file_priv ) { \
242 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
243 __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
244 dev->lock.file_priv, file_priv ); \
245 return -EINVAL; \
246 } \
247} while (0)
248
249/**
250 * Copy and IOCTL return string to user space
251 */
252#define DRM_COPY( name, value ) \
253 len = strlen( value ); \
254 if ( len > name##_len ) len = name##_len; \
255 name##_len = strlen( value ); \
256 if ( len && name ) { \
257 if ( copy_to_user( name, value, len ) ) \
258 return -EFAULT; \
259 }
260
261/**
262 * Ioctl function type.
263 *
264 * \param inode device inode.
265 * \param file_priv DRM file private pointer.
266 * \param cmd command.
267 * \param arg argument.
268 */
269typedef int drm_ioctl_t(struct drm_device *dev, void *data,
270 struct drm_file *file_priv);
271
272typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
273 unsigned long arg);
274
275#define DRM_AUTH 0x1
276#define DRM_MASTER 0x2
277#define DRM_ROOT_ONLY 0x4
278
279struct drm_ioctl_desc {
280 unsigned int cmd;
281 drm_ioctl_t *func;
282 int flags;
283};
284
285/**
286 * Creates a driver or general drm_ioctl_desc array entry for the given
287 * ioctl, for use by drm_ioctl().
288 */
289#define DRM_IOCTL_DEF(ioctl, func, flags) \
290 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
291
292struct drm_magic_entry {
293 struct list_head head;
294 struct drm_hash_item hash_item;
295 struct drm_file *priv;
296};
297
298struct drm_vma_entry {
299 struct list_head head;
300 struct vm_area_struct *vma;
301 pid_t pid;
302};
303
304/**
305 * DMA buffer.
306 */
307struct drm_buf {
308 int idx; /**< Index into master buflist */
309 int total; /**< Buffer size */
310 int order; /**< log-base-2(total) */
311 int used; /**< Amount of buffer in use (for DMA) */
312 unsigned long offset; /**< Byte offset (used internally) */
313 void *address; /**< Address of buffer */
314 unsigned long bus_address; /**< Bus address of buffer */
315 struct drm_buf *next; /**< Kernel-only: used for free list */
316 __volatile__ int waiting; /**< On kernel DMA queue */
317 __volatile__ int pending; /**< On hardware DMA queue */
318 wait_queue_head_t dma_wait; /**< Processes waiting */
319 struct drm_file *file_priv; /**< Private of holding file descr */
320 int context; /**< Kernel queue for this buffer */
321 int while_locked; /**< Dispatch this buffer while locked */
322 enum {
323 DRM_LIST_NONE = 0,
324 DRM_LIST_FREE = 1,
325 DRM_LIST_WAIT = 2,
326 DRM_LIST_PEND = 3,
327 DRM_LIST_PRIO = 4,
328 DRM_LIST_RECLAIM = 5
329 } list; /**< Which list we're on */
330
331 int dev_priv_size; /**< Size of buffer private storage */
332 void *dev_private; /**< Per-buffer private storage */
333};
334
335/** bufs is one longer than it has to be */
336struct drm_waitlist {
337 int count; /**< Number of possible buffers */
338 struct drm_buf **bufs; /**< List of pointers to buffers */
339 struct drm_buf **rp; /**< Read pointer */
340 struct drm_buf **wp; /**< Write pointer */
341 struct drm_buf **end; /**< End pointer */
342 spinlock_t read_lock;
343 spinlock_t write_lock;
344};
345
346struct drm_freelist {
347 int initialized; /**< Freelist in use */
348 atomic_t count; /**< Number of free buffers */
349 struct drm_buf *next; /**< End pointer */
350
351 wait_queue_head_t waiting; /**< Processes waiting on free bufs */
352 int low_mark; /**< Low water mark */
353 int high_mark; /**< High water mark */
354 atomic_t wfh; /**< If waiting for high mark */
355 spinlock_t lock;
356};
357
358typedef struct drm_dma_handle {
359 dma_addr_t busaddr;
360 void *vaddr;
361 size_t size;
362} drm_dma_handle_t;
363
364/**
365 * Buffer entry. There is one of this for each buffer size order.
366 */
367struct drm_buf_entry {
368 int buf_size; /**< size */
369 int buf_count; /**< number of buffers */
370 struct drm_buf *buflist; /**< buffer list */
371 int seg_count;
372 int page_order;
373 struct drm_dma_handle **seglist;
374
375 struct drm_freelist freelist;
376};
377
378/** File private data */
379struct drm_file {
380 int authenticated;
381 int master;
382 pid_t pid;
383 uid_t uid;
384 drm_magic_t magic;
385 unsigned long ioctl_count;
386 struct list_head lhead;
387 struct drm_minor *minor;
388 int remove_auth_on_close;
389 unsigned long lock_count;
390 struct file *filp;
391 void *driver_priv;
392};
393
394/** Wait queue */
395struct drm_queue {
396 atomic_t use_count; /**< Outstanding uses (+1) */
397 atomic_t finalization; /**< Finalization in progress */
398 atomic_t block_count; /**< Count of processes waiting */
399 atomic_t block_read; /**< Queue blocked for reads */
400 wait_queue_head_t read_queue; /**< Processes waiting on block_read */
401 atomic_t block_write; /**< Queue blocked for writes */
402 wait_queue_head_t write_queue; /**< Processes waiting on block_write */
403 atomic_t total_queued; /**< Total queued statistic */
404 atomic_t total_flushed; /**< Total flushes statistic */
405 atomic_t total_locks; /**< Total locks statistics */
406 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
407 struct drm_waitlist waitlist; /**< Pending buffers */
408 wait_queue_head_t flush_queue; /**< Processes waiting until flush */
409};
410
411/**
412 * Lock data.
413 */
414struct drm_lock_data {
415 struct drm_hw_lock *hw_lock; /**< Hardware lock */
416 /** Private of lock holder's file (NULL=kernel) */
417 struct drm_file *file_priv;
418 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
419 unsigned long lock_time; /**< Time of last lock in jiffies */
420 spinlock_t spinlock;
421 uint32_t kernel_waiters;
422 uint32_t user_waiters;
423 int idle_has_lock;
424};
425
426/**
427 * DMA data.
428 */
429struct drm_device_dma {
430
431 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
432 int buf_count; /**< total number of buffers */
433 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
434 int seg_count;
435 int page_count; /**< number of pages */
436 unsigned long *pagelist; /**< page list */
437 unsigned long byte_count;
438 enum {
439 _DRM_DMA_USE_AGP = 0x01,
440 _DRM_DMA_USE_SG = 0x02,
441 _DRM_DMA_USE_FB = 0x04,
442 _DRM_DMA_USE_PCI_RO = 0x08
443 } flags;
444
445};
446
447/**
448 * AGP memory entry. Stored as a doubly linked list.
449 */
450struct drm_agp_mem {
451 unsigned long handle; /**< handle */
452 DRM_AGP_MEM *memory;
453 unsigned long bound; /**< address */
454 int pages;
455 struct list_head head;
456};
457
458/**
459 * AGP data.
460 *
461 * \sa drm_agp_init() and drm_device::agp.
462 */
463struct drm_agp_head {
464 DRM_AGP_KERN agp_info; /**< AGP device information */
465 struct list_head memory;
466 unsigned long mode; /**< AGP mode */
467 struct agp_bridge_data *bridge;
468 int enabled; /**< whether the AGP bus as been enabled */
469 int acquired; /**< whether the AGP device has been acquired */
470 unsigned long base;
471 int agp_mtrr;
472 int cant_use_aperture;
473 unsigned long page_mask;
474};
475
476/**
477 * Scatter-gather memory.
478 */
479struct drm_sg_mem {
480 unsigned long handle;
481 void *virtual;
482 int pages;
483 struct page **pagelist;
484 dma_addr_t *busaddr;
485};
486
487struct drm_sigdata {
488 int context;
489 struct drm_hw_lock *lock;
490};
491
492
493/*
494 * Generic memory manager structs
495 */
496
497struct drm_mm_node {
498 struct list_head fl_entry;
499 struct list_head ml_entry;
500 int free;
501 unsigned long start;
502 unsigned long size;
503 struct drm_mm *mm;
504 void *private;
505};
506
507struct drm_mm {
508 struct list_head fl_entry;
509 struct list_head ml_entry;
510};
511
512
513/**
514 * Mappings list
515 */
516struct drm_map_list {
517 struct list_head head; /**< list head */
518 struct drm_hash_item hash;
519 struct drm_map *map; /**< mapping */
520 uint64_t user_token;
521};
522
523typedef struct drm_map drm_local_map_t;
524
525/**
526 * Context handle list
527 */
528struct drm_ctx_list {
529 struct list_head head; /**< list head */
530 drm_context_t handle; /**< context handle */
531 struct drm_file *tag; /**< associated fd private data */
532};
533
534struct drm_vbl_sig {
535 struct list_head head;
536 unsigned int sequence;
537 struct siginfo info;
538 struct task_struct *task;
539};
540
541/* location of GART table */
542#define DRM_ATI_GART_MAIN 1
543#define DRM_ATI_GART_FB 2
544
545#define DRM_ATI_GART_PCI 1
546#define DRM_ATI_GART_PCIE 2
547#define DRM_ATI_GART_IGP 3
548
549struct drm_ati_pcigart_info {
550 int gart_table_location;
551 int gart_reg_if;
552 void *addr;
553 dma_addr_t bus_addr;
554 dma_addr_t table_mask;
555 struct drm_dma_handle *table_handle;
556 drm_local_map_t mapping;
557 int table_size;
558};
559
560/**
561 * DRM driver structure. This structure represent the common code for
562 * a family of cards. There will one drm_device for each card present
563 * in this family
564 */
565struct drm_driver {
566 int (*load) (struct drm_device *, unsigned long flags);
567 int (*firstopen) (struct drm_device *);
568 int (*open) (struct drm_device *, struct drm_file *);
569 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
570 void (*postclose) (struct drm_device *, struct drm_file *);
571 void (*lastclose) (struct drm_device *);
572 int (*unload) (struct drm_device *);
573 int (*suspend) (struct drm_device *, pm_message_t state);
574 int (*resume) (struct drm_device *);
575 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
576 void (*dma_ready) (struct drm_device *);
577 int (*dma_quiescent) (struct drm_device *);
578 int (*context_ctor) (struct drm_device *dev, int context);
579 int (*context_dtor) (struct drm_device *dev, int context);
580 int (*kernel_context_switch) (struct drm_device *dev, int old,
581 int new);
582 void (*kernel_context_switch_unlock) (struct drm_device *dev);
583 int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
584 int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
585 int (*dri_library_name) (struct drm_device *dev, char *buf);
586
587 /**
588 * Called by \c drm_device_is_agp. Typically used to determine if a
589 * card is really attached to AGP or not.
590 *
591 * \param dev DRM device handle
592 *
593 * \returns
594 * One of three values is returned depending on whether or not the
595 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
596 * (return of 1), or may or may not be AGP (return of 2).
597 */
598 int (*device_is_agp) (struct drm_device *dev);
599
600 /* these have to be filled in */
601
602 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
603 void (*irq_preinstall) (struct drm_device *dev);
604 void (*irq_postinstall) (struct drm_device *dev);
605 void (*irq_uninstall) (struct drm_device *dev);
606 void (*reclaim_buffers) (struct drm_device *dev,
607 struct drm_file * file_priv);
608 void (*reclaim_buffers_locked) (struct drm_device *dev,
609 struct drm_file *file_priv);
610 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
611 struct drm_file *file_priv);
612 unsigned long (*get_map_ofs) (struct drm_map * map);
613 unsigned long (*get_reg_ofs) (struct drm_device *dev);
614 void (*set_version) (struct drm_device *dev,
615 struct drm_set_version *sv);
616
617 int major;
618 int minor;
619 int patchlevel;
620 char *name;
621 char *desc;
622 char *date;
623
624 u32 driver_features;
625 int dev_priv_size;
626 struct drm_ioctl_desc *ioctls;
627 int num_ioctls;
628 struct file_operations fops;
629 struct pci_driver pci_driver;
630};
631
632#define DRM_MINOR_UNASSIGNED 0
633#define DRM_MINOR_LEGACY 1
634
635/**
636 * DRM minor structure. This structure represents a drm minor number.
637 */
638struct drm_minor {
639 int index; /**< Minor device number */
640 int type; /**< Control or render */
641 dev_t device; /**< Device number for mknod */
642 struct device kdev; /**< Linux device */
643 struct drm_device *dev;
644 struct proc_dir_entry *dev_root; /**< proc directory entry */
645};
646
647/**
648 * DRM device structure. This structure represent a complete card that
649 * may contain multiple heads.
650 */
651struct drm_device {
652 char *unique; /**< Unique identifier: e.g., busid */
653 int unique_len; /**< Length of unique field */
654 char *devname; /**< For /proc/interrupts */
655 int if_version; /**< Highest interface version set */
656
657 int blocked; /**< Blocked due to VC switch? */
658
659 /** \name Locks */
660 /*@{ */
661 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
662 struct mutex struct_mutex; /**< For others */
663 /*@} */
664
665 /** \name Usage Counters */
666 /*@{ */
667 int open_count; /**< Outstanding files open */
668 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
669 atomic_t vma_count; /**< Outstanding vma areas open */
670 int buf_use; /**< Buffers in use -- cannot alloc */
671 atomic_t buf_alloc; /**< Buffer allocation in progress */
672 /*@} */
673
674 /** \name Performance counters */
675 /*@{ */
676 unsigned long counters;
677 enum drm_stat_type types[15];
678 atomic_t counts[15];
679 /*@} */
680
681 /** \name Authentication */
682 /*@{ */
683 struct list_head filelist;
684 struct drm_open_hash magiclist; /**< magic hash table */
685 struct list_head magicfree;
686 /*@} */
687
688 /** \name Memory management */
689 /*@{ */
690 struct list_head maplist; /**< Linked list of regions */
691 int map_count; /**< Number of mappable regions */
692 struct drm_open_hash map_hash; /**< User token hash table for maps */
693
694 /** \name Context handle management */
695 /*@{ */
696 struct list_head ctxlist; /**< Linked list of context handles */
697 int ctx_count; /**< Number of context handles */
698 struct mutex ctxlist_mutex; /**< For ctxlist */
699
700 struct idr ctx_idr;
701
702 struct list_head vmalist; /**< List of vmas (for debugging) */
703 struct drm_lock_data lock; /**< Information on hardware lock */
704 /*@} */
705
706 /** \name DMA queues (contexts) */
707 /*@{ */
708 int queue_count; /**< Number of active DMA queues */
709 int queue_reserved; /**< Number of reserved DMA queues */
710 int queue_slots; /**< Actual length of queuelist */
711 struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
712 struct drm_device_dma *dma; /**< Optional pointer for DMA support */
713 /*@} */
714
715 /** \name Context support */
716 /*@{ */
717 int irq; /**< Interrupt used by board */
718 int irq_enabled; /**< True if irq handler is enabled */
719 __volatile__ long context_flag; /**< Context swapping flag */
720 __volatile__ long interrupt_flag; /**< Interruption handler flag */
721 __volatile__ long dma_flag; /**< DMA dispatch flag */
722 struct timer_list timer; /**< Timer for delaying ctx switch */
723 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
724 int last_checked; /**< Last context checked for DMA */
725 int last_context; /**< Last current context */
726 unsigned long last_switch; /**< jiffies at last context switch */
727 /*@} */
728
729 struct work_struct work;
730 /** \name VBLANK IRQ support */
731 /*@{ */
732
733 wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
734 atomic_t vbl_received;
735 atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
736 spinlock_t vbl_lock;
737 struct list_head vbl_sigs; /**< signal list to send on VBLANK */
738 struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
739 unsigned int vbl_pending;
740 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
741 void (*locked_tasklet_func)(struct drm_device *dev);
742
743 /*@} */
744 cycles_t ctx_start;
745 cycles_t lck_start;
746
747 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
748 wait_queue_head_t buf_readers; /**< Processes waiting to read */
749 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
750
751 struct drm_agp_head *agp; /**< AGP data */
752
753 struct pci_dev *pdev; /**< PCI device structure */
754 int pci_vendor; /**< PCI vendor id */
755 int pci_device; /**< PCI device id */
756#ifdef __alpha__
757 struct pci_controller *hose;
758#endif
759 struct drm_sg_mem *sg; /**< Scatter gather memory */
760 void *dev_private; /**< device private data */
761 struct drm_sigdata sigdata; /**< For block_all_signals */
762 sigset_t sigmask;
763
764 struct drm_driver *driver;
765 drm_local_map_t *agp_buffer_map;
766 unsigned int agp_buffer_token;
767 struct drm_minor *primary; /**< render type primary screen head */
768
769 /** \name Drawable information */
770 /*@{ */
771 spinlock_t drw_lock;
772 struct idr drw_idr;
773 /*@} */
774};
775
776static __inline__ int drm_core_check_feature(struct drm_device *dev,
777 int feature)
778{
779 return ((dev->driver->driver_features & feature) ? 1 : 0);
780}
781
782#ifdef __alpha__
783#define drm_get_pci_domain(dev) dev->hose->index
784#else
785#define drm_get_pci_domain(dev) 0
786#endif
787
788#if __OS_HAS_AGP
789static inline int drm_core_has_AGP(struct drm_device *dev)
790{
791 return drm_core_check_feature(dev, DRIVER_USE_AGP);
792}
793#else
794#define drm_core_has_AGP(dev) (0)
795#endif
796
797#if __OS_HAS_MTRR
798static inline int drm_core_has_MTRR(struct drm_device *dev)
799{
800 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
801}
802
803#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
804
805static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
806 unsigned int flags)
807{
808 return mtrr_add(offset, size, flags, 1);
809}
810
811static inline int drm_mtrr_del(int handle, unsigned long offset,
812 unsigned long size, unsigned int flags)
813{
814 return mtrr_del(handle, offset, size);
815}
816
817#else
818#define drm_core_has_MTRR(dev) (0)
819
820#define DRM_MTRR_WC 0
821
822static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
823 unsigned int flags)
824{
825 return 0;
826}
827
828static inline int drm_mtrr_del(int handle, unsigned long offset,
829 unsigned long size, unsigned int flags)
830{
831 return 0;
832}
833#endif
834
835/******************************************************************/
836/** \name Internal function definitions */
837/*@{*/
838
839 /* Driver support (drm_drv.h) */
840extern int drm_init(struct drm_driver *driver);
841extern void drm_exit(struct drm_driver *driver);
842extern int drm_ioctl(struct inode *inode, struct file *filp,
843 unsigned int cmd, unsigned long arg);
844extern long drm_compat_ioctl(struct file *filp,
845 unsigned int cmd, unsigned long arg);
846extern int drm_lastclose(struct drm_device *dev);
847
848 /* Device support (drm_fops.h) */
849extern int drm_open(struct inode *inode, struct file *filp);
850extern int drm_stub_open(struct inode *inode, struct file *filp);
851extern int drm_fasync(int fd, struct file *filp, int on);
852extern int drm_release(struct inode *inode, struct file *filp);
853
854 /* Mapping support (drm_vm.h) */
855extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
856extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
857extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
858extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
859
860 /* Memory management support (drm_memory.h) */
861#include "drm_memory.h"
862extern void drm_mem_init(void);
863extern int drm_mem_info(char *buf, char **start, off_t offset,
864 int request, int *eof, void *data);
865extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
866
867extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
868extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
869extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
870extern int drm_unbind_agp(DRM_AGP_MEM * handle);
871
872 /* Misc. IOCTL support (drm_ioctl.h) */
873extern int drm_irq_by_busid(struct drm_device *dev, void *data,
874 struct drm_file *file_priv);
875extern int drm_getunique(struct drm_device *dev, void *data,
876 struct drm_file *file_priv);
877extern int drm_setunique(struct drm_device *dev, void *data,
878 struct drm_file *file_priv);
879extern int drm_getmap(struct drm_device *dev, void *data,
880 struct drm_file *file_priv);
881extern int drm_getclient(struct drm_device *dev, void *data,
882 struct drm_file *file_priv);
883extern int drm_getstats(struct drm_device *dev, void *data,
884 struct drm_file *file_priv);
885extern int drm_setversion(struct drm_device *dev, void *data,
886 struct drm_file *file_priv);
887extern int drm_noop(struct drm_device *dev, void *data,
888 struct drm_file *file_priv);
889
890 /* Context IOCTL support (drm_context.h) */
891extern int drm_resctx(struct drm_device *dev, void *data,
892 struct drm_file *file_priv);
893extern int drm_addctx(struct drm_device *dev, void *data,
894 struct drm_file *file_priv);
895extern int drm_modctx(struct drm_device *dev, void *data,
896 struct drm_file *file_priv);
897extern int drm_getctx(struct drm_device *dev, void *data,
898 struct drm_file *file_priv);
899extern int drm_switchctx(struct drm_device *dev, void *data,
900 struct drm_file *file_priv);
901extern int drm_newctx(struct drm_device *dev, void *data,
902 struct drm_file *file_priv);
903extern int drm_rmctx(struct drm_device *dev, void *data,
904 struct drm_file *file_priv);
905
906extern int drm_ctxbitmap_init(struct drm_device *dev);
907extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
908extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
909
910extern int drm_setsareactx(struct drm_device *dev, void *data,
911 struct drm_file *file_priv);
912extern int drm_getsareactx(struct drm_device *dev, void *data,
913 struct drm_file *file_priv);
914
915 /* Drawable IOCTL support (drm_drawable.h) */
916extern int drm_adddraw(struct drm_device *dev, void *data,
917 struct drm_file *file_priv);
918extern int drm_rmdraw(struct drm_device *dev, void *data,
919 struct drm_file *file_priv);
920extern int drm_update_drawable_info(struct drm_device *dev, void *data,
921 struct drm_file *file_priv);
922extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
923 drm_drawable_t id);
924extern void drm_drawable_free_all(struct drm_device *dev);
925
926 /* Authentication IOCTL support (drm_auth.h) */
927extern int drm_getmagic(struct drm_device *dev, void *data,
928 struct drm_file *file_priv);
929extern int drm_authmagic(struct drm_device *dev, void *data,
930 struct drm_file *file_priv);
931
932 /* Locking IOCTL support (drm_lock.h) */
933extern int drm_lock(struct drm_device *dev, void *data,
934 struct drm_file *file_priv);
935extern int drm_unlock(struct drm_device *dev, void *data,
936 struct drm_file *file_priv);
937extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
938extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
939extern void drm_idlelock_take(struct drm_lock_data *lock_data);
940extern void drm_idlelock_release(struct drm_lock_data *lock_data);
941
942/*
943 * These are exported to drivers so that they can implement fencing using
944 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
945 */
946
947extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
948
949 /* Buffer management support (drm_bufs.h) */
950extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
951extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
952extern int drm_addmap(struct drm_device *dev, unsigned int offset,
953 unsigned int size, enum drm_map_type type,
954 enum drm_map_flags flags, drm_local_map_t ** map_ptr);
955extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
956 struct drm_file *file_priv);
957extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
958extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
959extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
960 struct drm_file *file_priv);
961extern int drm_addbufs(struct drm_device *dev, void *data,
962 struct drm_file *file_priv);
963extern int drm_infobufs(struct drm_device *dev, void *data,
964 struct drm_file *file_priv);
965extern int drm_markbufs(struct drm_device *dev, void *data,
966 struct drm_file *file_priv);
967extern int drm_freebufs(struct drm_device *dev, void *data,
968 struct drm_file *file_priv);
969extern int drm_mapbufs(struct drm_device *dev, void *data,
970 struct drm_file *file_priv);
971extern int drm_order(unsigned long size);
972extern unsigned long drm_get_resource_start(struct drm_device *dev,
973 unsigned int resource);
974extern unsigned long drm_get_resource_len(struct drm_device *dev,
975 unsigned int resource);
976
977 /* DMA support (drm_dma.h) */
978extern int drm_dma_setup(struct drm_device *dev);
979extern void drm_dma_takedown(struct drm_device *dev);
980extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
981extern void drm_core_reclaim_buffers(struct drm_device *dev,
982 struct drm_file *filp);
983
984 /* IRQ support (drm_irq.h) */
985extern int drm_control(struct drm_device *dev, void *data,
986 struct drm_file *file_priv);
987extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
988extern int drm_irq_uninstall(struct drm_device *dev);
989extern void drm_driver_irq_preinstall(struct drm_device *dev);
990extern void drm_driver_irq_postinstall(struct drm_device *dev);
991extern void drm_driver_irq_uninstall(struct drm_device *dev);
992
993extern int drm_wait_vblank(struct drm_device *dev, void *data,
994 struct drm_file *file_priv);
995extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
996extern void drm_vbl_send_signals(struct drm_device *dev);
997extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
998
999 /* AGP/GART support (drm_agpsupport.h) */
1000extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
1001extern int drm_agp_acquire(struct drm_device *dev);
1002extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1003 struct drm_file *file_priv);
1004extern int drm_agp_release(struct drm_device *dev);
1005extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv);
1007extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
1008extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1009 struct drm_file *file_priv);
1010extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1011extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1012 struct drm_file *file_priv);
1013extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
1014extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1015 struct drm_file *file_priv);
1016extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
1017extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1018 struct drm_file *file_priv);
1019extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1020extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1021 struct drm_file *file_priv);
1022extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1023extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1024 struct drm_file *file_priv);
1025extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
1026extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1027extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1028extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1029
1030 /* Stub support (drm_stub.h) */
1031extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
1032 struct drm_driver *driver);
1033extern int drm_put_dev(struct drm_device *dev);
1034extern int drm_put_minor(struct drm_minor **minor);
1035extern unsigned int drm_debug;
1036
1037extern struct class *drm_class;
1038extern struct proc_dir_entry *drm_proc_root;
1039
1040extern struct idr drm_minors_idr;
1041
1042extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
1043
1044 /* Proc support (drm_proc.h) */
1045extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1046 struct proc_dir_entry *root);
1047extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1048
1049 /* Scatter Gather Support (drm_scatter.h) */
1050extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1051extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1053extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1054extern int drm_sg_free(struct drm_device *dev, void *data,
1055 struct drm_file *file_priv);
1056
1057 /* ATI PCIGART support (ati_pcigart.h) */
1058extern int drm_ati_pcigart_init(struct drm_device *dev,
1059 struct drm_ati_pcigart_info * gart_info);
1060extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1061 struct drm_ati_pcigart_info * gart_info);
1062
1063extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1064 size_t align, dma_addr_t maxaddr);
1065extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1066extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1067
1068 /* sysfs support (drm_sysfs.c) */
1069struct drm_sysfs_class;
1070extern struct class *drm_sysfs_create(struct module *owner, char *name);
1071extern void drm_sysfs_destroy(void);
1072extern int drm_sysfs_device_add(struct drm_minor *minor);
1073extern void drm_sysfs_device_remove(struct drm_minor *minor);
1074
1075/*
1076 * Basic memory manager support (drm_mm.c)
1077 */
1078extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
1079 unsigned long size,
1080 unsigned alignment);
1081extern void drm_mm_put_block(struct drm_mm_node * cur);
1082extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
1083 unsigned alignment, int best_match);
1084extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
1085extern void drm_mm_takedown(struct drm_mm *mm);
1086extern int drm_mm_clean(struct drm_mm *mm);
1087extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1088extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1089extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1090
1091extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1092extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
1093
1094static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
1095 unsigned int token)
1096{
1097 struct drm_map_list *_entry;
1098 list_for_each_entry(_entry, &dev->maplist, head)
1099 if (_entry->user_token == token)
1100 return _entry->map;
1101 return NULL;
1102}
1103
1104static __inline__ int drm_device_is_agp(struct drm_device *dev)
1105{
1106 if (dev->driver->device_is_agp != NULL) {
1107 int err = (*dev->driver->device_is_agp) (dev);
1108
1109 if (err != 2) {
1110 return err;
1111 }
1112 }
1113
1114 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1115}
1116
1117static __inline__ int drm_device_is_pcie(struct drm_device *dev)
1118{
1119 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
1120}
1121
1122static __inline__ void drm_core_dropmap(struct drm_map *map)
1123{
1124}
1125
1126#ifndef DEBUG_MEMORY
1127/** Wrapper around kmalloc() */
1128static __inline__ void *drm_alloc(size_t size, int area)
1129{
1130 return kmalloc(size, GFP_KERNEL);
1131}
1132
1133/** Wrapper around kfree() */
1134static __inline__ void drm_free(void *pt, size_t size, int area)
1135{
1136 kfree(pt);
1137}
1138
1139/** Wrapper around kcalloc() */
1140static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
1141{
1142 return kcalloc(nmemb, size, GFP_KERNEL);
1143}
1144#else
1145extern void *drm_alloc(size_t size, int area);
1146extern void drm_free(void *pt, size_t size, int area);
1147extern void *drm_calloc(size_t nmemb, size_t size, int area);
1148#endif
1149
1150/*@}*/
1151
1152#endif /* __KERNEL__ */
1153#endif
diff --git a/drivers/char/drm/drm_core.h b/drivers/char/drm/drm_core.h
deleted file mode 100644
index 316739036079..000000000000
--- a/drivers/char/drm/drm_core.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
24
25#define CORE_NAME "drm"
26#define CORE_DESC "DRM shared core routines"
27#define CORE_DATE "20060810"
28
29#define DRM_IF_MAJOR 1
30#define DRM_IF_MINOR 3
31
32#define CORE_MAJOR 1
33#define CORE_MINOR 1
34#define CORE_PATCHLEVEL 0
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
deleted file mode 100644
index cd2b189e1be6..000000000000
--- a/drivers/char/drm/drm_hashtab.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple open hash tab implementation.
30 *
31 * Authors:
32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 */
34
35#ifndef DRM_HASHTAB_H
36#define DRM_HASHTAB_H
37
38#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
39
40struct drm_hash_item {
41 struct hlist_node head;
42 unsigned long key;
43};
44
45struct drm_open_hash {
46 unsigned int size;
47 unsigned int order;
48 unsigned int fill;
49 struct hlist_head *table;
50 int use_vmalloc;
51};
52
53
54extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
55extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
56extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
57 unsigned long seed, int bits, int shift,
58 unsigned long add);
59extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
60
61extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
62extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
63extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
64extern void drm_ht_remove(struct drm_open_hash *ht);
65
66
67#endif
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
deleted file mode 100644
index 63e425b5ea82..000000000000
--- a/drivers/char/drm/drm_memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/**
2 * \file drm_memory.h
3 * Memory management wrappers for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/highmem.h>
37#include <linux/vmalloc.h>
38#include "drmP.h"
39
40/**
41 * Cut down version of drm_memory_debug.h, which used to be called
42 * drm_memory.h.
43 */
44
45#if __OS_HAS_AGP
46
47#include <linux/vmalloc.h>
48
49#ifdef HAVE_PAGE_AGP
50#include <asm/agp.h>
51#else
52# ifdef __powerpc__
53# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
54# else
55# define PAGE_AGP PAGE_KERNEL
56# endif
57#endif
58
59#else /* __OS_HAS_AGP */
60
61#endif
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
deleted file mode 100644
index 6463271deea8..000000000000
--- a/drivers/char/drm/drm_memory_debug.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/**
2 * \file drm_memory_debug.h
3 * Memory management wrappers for DRM.
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#include "drmP.h"
35
36typedef struct drm_mem_stats {
37 const char *name;
38 int succeed_count;
39 int free_count;
40 int fail_count;
41 unsigned long bytes_allocated;
42 unsigned long bytes_freed;
43} drm_mem_stats_t;
44
45static DEFINE_SPINLOCK(drm_mem_lock);
46static unsigned long drm_ram_available = 0; /* In pages */
47static unsigned long drm_ram_used = 0;
48static drm_mem_stats_t drm_mem_stats[] =
49{
50 [DRM_MEM_DMA] = {"dmabufs"},
51 [DRM_MEM_SAREA] = {"sareas"},
52 [DRM_MEM_DRIVER] = {"driver"},
53 [DRM_MEM_MAGIC] = {"magic"},
54 [DRM_MEM_IOCTLS] = {"ioctltab"},
55 [DRM_MEM_MAPS] = {"maplist"},
56 [DRM_MEM_VMAS] = {"vmalist"},
57 [DRM_MEM_BUFS] = {"buflist"},
58 [DRM_MEM_SEGS] = {"seglist"},
59 [DRM_MEM_PAGES] = {"pagelist"},
60 [DRM_MEM_FILES] = {"files"},
61 [DRM_MEM_QUEUES] = {"queues"},
62 [DRM_MEM_CMDS] = {"commands"},
63 [DRM_MEM_MAPPINGS] = {"mappings"},
64 [DRM_MEM_BUFLISTS] = {"buflists"},
65 [DRM_MEM_AGPLISTS] = {"agplist"},
66 [DRM_MEM_SGLISTS] = {"sglist"},
67 [DRM_MEM_TOTALAGP] = {"totalagp"},
68 [DRM_MEM_BOUNDAGP] = {"boundagp"},
69 [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
70 [DRM_MEM_CTXLIST] = {"ctxlist"},
71 [DRM_MEM_STUB] = {"stub"},
72 {NULL, 0,} /* Last entry must be null */
73};
74
75void drm_mem_init (void) {
76 drm_mem_stats_t *mem;
77 struct sysinfo si;
78
79 for (mem = drm_mem_stats; mem->name; ++mem) {
80 mem->succeed_count = 0;
81 mem->free_count = 0;
82 mem->fail_count = 0;
83 mem->bytes_allocated = 0;
84 mem->bytes_freed = 0;
85 }
86
87 si_meminfo(&si);
88 drm_ram_available = si.totalram;
89 drm_ram_used = 0;
90}
91
92/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
93
94static int drm__mem_info (char *buf, char **start, off_t offset,
95 int request, int *eof, void *data) {
96 drm_mem_stats_t *pt;
97 int len = 0;
98
99 if (offset > DRM_PROC_LIMIT) {
100 *eof = 1;
101 return 0;
102 }
103
104 *eof = 0;
105 *start = &buf[offset];
106
107 DRM_PROC_PRINT(" total counts "
108 " | outstanding \n");
109 DRM_PROC_PRINT("type alloc freed fail bytes freed"
110 " | allocs bytes\n\n");
111 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
112 "system", 0, 0, 0,
113 drm_ram_available << (PAGE_SHIFT - 10));
114 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
115 "locked", 0, 0, 0, drm_ram_used >> 10);
116 DRM_PROC_PRINT("\n");
117 for (pt = drm_mem_stats; pt->name; pt++) {
118 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
119 pt->name,
120 pt->succeed_count,
121 pt->free_count,
122 pt->fail_count,
123 pt->bytes_allocated,
124 pt->bytes_freed,
125 pt->succeed_count - pt->free_count,
126 (long)pt->bytes_allocated
127 - (long)pt->bytes_freed);
128 }
129
130 if (len > request + offset)
131 return request;
132 *eof = 1;
133 return len - offset;
134}
135
136int drm_mem_info (char *buf, char **start, off_t offset,
137 int len, int *eof, void *data) {
138 int ret;
139
140 spin_lock(&drm_mem_lock);
141 ret = drm__mem_info (buf, start, offset, len, eof, data);
142 spin_unlock(&drm_mem_lock);
143 return ret;
144}
145
146void *drm_alloc (size_t size, int area) {
147 void *pt;
148
149 if (!size) {
150 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
151 return NULL;
152 }
153
154 if (!(pt = kmalloc(size, GFP_KERNEL))) {
155 spin_lock(&drm_mem_lock);
156 ++drm_mem_stats[area].fail_count;
157 spin_unlock(&drm_mem_lock);
158 return NULL;
159 }
160 spin_lock(&drm_mem_lock);
161 ++drm_mem_stats[area].succeed_count;
162 drm_mem_stats[area].bytes_allocated += size;
163 spin_unlock(&drm_mem_lock);
164 return pt;
165}
166
167void *drm_calloc (size_t nmemb, size_t size, int area) {
168 void *addr;
169
170 addr = drm_alloc (nmemb * size, area);
171 if (addr != NULL)
172 memset((void *)addr, 0, size * nmemb);
173
174 return addr;
175}
176
177void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
178 void *pt;
179
180 if (!(pt = drm_alloc (size, area)))
181 return NULL;
182 if (oldpt && oldsize) {
183 memcpy(pt, oldpt, oldsize);
184 drm_free (oldpt, oldsize, area);
185 }
186 return pt;
187}
188
189void drm_free (void *pt, size_t size, int area) {
190 int alloc_count;
191 int free_count;
192
193 if (!pt)
194 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
195 else
196 kfree(pt);
197 spin_lock(&drm_mem_lock);
198 drm_mem_stats[area].bytes_freed += size;
199 free_count = ++drm_mem_stats[area].free_count;
200 alloc_count = drm_mem_stats[area].succeed_count;
201 spin_unlock(&drm_mem_lock);
202 if (free_count > alloc_count) {
203 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
204 free_count, alloc_count);
205 }
206}
207
208#if __OS_HAS_AGP
209
210DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
211 DRM_AGP_MEM *handle;
212
213 if (!pages) {
214 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
215 return NULL;
216 }
217
218 if ((handle = drm_agp_allocate_memory (pages, type))) {
219 spin_lock(&drm_mem_lock);
220 ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
221 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
222 += pages << PAGE_SHIFT;
223 spin_unlock(&drm_mem_lock);
224 return handle;
225 }
226 spin_lock(&drm_mem_lock);
227 ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
228 spin_unlock(&drm_mem_lock);
229 return NULL;
230}
231
232int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
233 int alloc_count;
234 int free_count;
235 int retval = -EINVAL;
236
237 if (!handle) {
238 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
239 "Attempt to free NULL AGP handle\n");
240 return retval;
241 }
242
243 if (drm_agp_free_memory (handle)) {
244 spin_lock(&drm_mem_lock);
245 free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
246 alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
247 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
248 += pages << PAGE_SHIFT;
249 spin_unlock(&drm_mem_lock);
250 if (free_count > alloc_count) {
251 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
252 "Excess frees: %d frees, %d allocs\n",
253 free_count, alloc_count);
254 }
255 return 0;
256 }
257 return retval;
258}
259
260int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
261 int retcode = -EINVAL;
262
263 if (!handle) {
264 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
265 "Attempt to bind NULL AGP handle\n");
266 return retcode;
267 }
268
269 if (!(retcode = drm_agp_bind_memory (handle, start))) {
270 spin_lock(&drm_mem_lock);
271 ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
272 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
273 += handle->page_count << PAGE_SHIFT;
274 spin_unlock(&drm_mem_lock);
275 return retcode;
276 }
277 spin_lock(&drm_mem_lock);
278 ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
279 spin_unlock(&drm_mem_lock);
280 return retcode;
281}
282
283int drm_unbind_agp (DRM_AGP_MEM * handle) {
284 int alloc_count;
285 int free_count;
286 int retcode = -EINVAL;
287
288 if (!handle) {
289 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
290 "Attempt to unbind NULL AGP handle\n");
291 return retcode;
292 }
293
294 if ((retcode = drm_agp_unbind_memory (handle)))
295 return retcode;
296 spin_lock(&drm_mem_lock);
297 free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
298 alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
299 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
300 += handle->page_count << PAGE_SHIFT;
301 spin_unlock(&drm_mem_lock);
302 if (free_count > alloc_count) {
303 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
304 "Excess frees: %d frees, %d allocs\n",
305 free_count, alloc_count);
306 }
307 return retcode;
308}
309#endif
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
deleted file mode 100644
index 8dbd2572b7c3..000000000000
--- a/drivers/char/drm/drm_os_linux.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/**
2 * \file drm_os_linux.h
3 * OS abstraction macros.
4 */
5
6#include <linux/interrupt.h> /* For task queue support */
7#include <linux/delay.h>
8
9/** Current process ID */
10#define DRM_CURRENTPID task_pid_nr(current)
11#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
12#define DRM_UDELAY(d) udelay(d)
13/** Read a byte from a MMIO region */
14#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
15/** Read a word from a MMIO region */
16#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
17/** Read a dword from a MMIO region */
18#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
19/** Write a byte into a MMIO region */
20#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
21/** Write a word into a MMIO region */
22#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
23/** Write a dword into a MMIO region */
24#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
25/** Read memory barrier */
26#define DRM_READMEMORYBARRIER() rmb()
27/** Write memory barrier */
28#define DRM_WRITEMEMORYBARRIER() wmb()
29/** Read/write memory barrier */
30#define DRM_MEMORYBARRIER() mb()
31
32/** IRQ handler arguments and return type and values */
33#define DRM_IRQ_ARGS int irq, void *arg
34
35/** AGP types */
36#if __OS_HAS_AGP
37#define DRM_AGP_MEM struct agp_memory
38#define DRM_AGP_KERN struct agp_kern_info
39#else
40/* define some dummy types for non AGP supporting kernels */
41struct no_agp_kern {
42 unsigned long aper_base;
43 unsigned long aper_size;
44};
45#define DRM_AGP_MEM int
46#define DRM_AGP_KERN struct no_agp_kern
47#endif
48
49#if !(__OS_HAS_MTRR)
50static __inline__ int mtrr_add(unsigned long base, unsigned long size,
51 unsigned int type, char increment)
52{
53 return -ENODEV;
54}
55
56static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
57{
58 return -ENODEV;
59}
60
61#define MTRR_TYPE_WRCOMB 1
62
63#endif
64
65/** Other copying of data to kernel space */
66#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \
67 copy_from_user(arg1, arg2, arg3)
68/** Other copying of data from kernel space */
69#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
70 copy_to_user(arg1, arg2, arg3)
71/* Macros for copyfrom user, but checking readability only once */
72#define DRM_VERIFYAREA_READ( uaddr, size ) \
73 (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
74#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
75 __copy_from_user(arg1, arg2, arg3)
76#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
77 __copy_to_user(arg1, arg2, arg3)
78#define DRM_GET_USER_UNCHECKED(val, uaddr) \
79 __get_user(val, uaddr)
80
81#define DRM_HZ HZ
82
83#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
84do { \
85 DECLARE_WAITQUEUE(entry, current); \
86 unsigned long end = jiffies + (timeout); \
87 add_wait_queue(&(queue), &entry); \
88 \
89 for (;;) { \
90 __set_current_state(TASK_INTERRUPTIBLE); \
91 if (condition) \
92 break; \
93 if (time_after_eq(jiffies, end)) { \
94 ret = -EBUSY; \
95 break; \
96 } \
97 schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
98 if (signal_pending(current)) { \
99 ret = -EINTR; \
100 break; \
101 } \
102 } \
103 __set_current_state(TASK_RUNNING); \
104 remove_wait_queue(&(queue), &entry); \
105} while (0)
106
107#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
108#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
deleted file mode 100644
index 135bd19499fc..000000000000
--- a/drivers/char/drm/drm_pciids.h
+++ /dev/null
@@ -1,415 +0,0 @@
1/*
2 This file is auto-generated from the drm_pciids.txt in the DRM CVS
3 Please contact dri-devel@lists.sf.net to add new cards to this list
4*/
5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
12 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
13 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
14 {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
15 {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
16 {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
17 {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
18 {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
19 {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
20 {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
21 {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
22 {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
23 {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
24 {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
25 {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
26 {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
27 {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
28 {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
29 {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
30 {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
31 {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
32 {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
33 {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
34 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
35 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
36 {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
37 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
38 {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
39 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
40 {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
41 {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
42 {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
43 {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
44 {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
45 {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
46 {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
47 {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
48 {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
49 {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
50 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
51 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
52 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
53 {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
54 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
55 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
56 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
57 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
58 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
59 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
60 {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
61 {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
62 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
63 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
64 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
65 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
66 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
67 {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
68 {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
69 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
70 {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
71 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
72 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
73 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
74 {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
75 {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
76 {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
77 {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
78 {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
79 {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
80 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
81 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
82 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
87 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
88 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
89 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
90 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
91 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
92 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
93 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
94 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
95 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
96 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
97 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
98 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
102 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
103 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
104 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
105 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
106 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
107 {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
108 {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
109 {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
110 {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
111 {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
112 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
116 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
117 {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
118 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
119 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
120 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
121 {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
122 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
123 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
124 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
125 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
126 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
127 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
128 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
129 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
130 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
131 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
132 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
133 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
134 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
135 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
136 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
137 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
138 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
139 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
140 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
141 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
163 {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
164 {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
179 {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
180 {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
181 {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
182 {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
183 {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
184 {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
185 {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
186 {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
187 {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
188 {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
189 {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
190 {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
191 {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
192 {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
193 {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
194 {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
195 {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
196 {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
197 {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
198 {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
199 {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
200 {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
201 {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
202 {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
203 {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
204 {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
205 {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
206 {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
207 {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
210 {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
211 {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
212 {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
213 {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
214 {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
215 {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
216 {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
217 {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
218 {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
219 {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
220 {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
221 {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
223 {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
224 {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
225 {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
226 {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
227 {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
228 {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
229 {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
230 {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
231 {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
232 {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
233 {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
234 {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
235 {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
236 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
237 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
239 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
240 {0, 0, 0}
241
242#define r128_PCI_IDS \
243 {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
244 {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
245 {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
246 {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
247 {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
248 {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
249 {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
250 {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
251 {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
252 {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
253 {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
254 {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
255 {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
256 {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
257 {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
258 {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
259 {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
260 {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
261 {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
262 {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
263 {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
264 {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
265 {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
266 {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
267 {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
268 {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
269 {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
270 {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
271 {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
272 {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
273 {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
274 {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
275 {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
276 {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
277 {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
278 {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
279 {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
280 {0, 0, 0}
281
282#define mga_PCI_IDS \
283 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
284 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
285 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
286 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
287 {0, 0, 0}
288
289#define mach64_PCI_IDS \
290 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
291 {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
292 {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
293 {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
294 {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
295 {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
296 {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
297 {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
298 {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
299 {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
300 {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
301 {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
302 {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
303 {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
304 {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
305 {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
306 {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
307 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
308 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
309 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
310 {0, 0, 0}
311
312#define sisdrv_PCI_IDS \
313 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
314 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
315 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
316 {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
317 {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
318 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
319 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
320 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
321 {0, 0, 0}
322
323#define tdfx_PCI_IDS \
324 {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
325 {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
326 {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
327 {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
328 {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
329 {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
330 {0, 0, 0}
331
332#define viadrv_PCI_IDS \
333 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
334 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
335 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
336 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
337 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
338 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
339 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
340 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
341 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
342 {0, 0, 0}
343
344#define i810_PCI_IDS \
345 {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
346 {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
347 {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
348 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
349 {0, 0, 0}
350
351#define i830_PCI_IDS \
352 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
353 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
354 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
355 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
356 {0, 0, 0}
357
358#define gamma_PCI_IDS \
359 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
360 {0, 0, 0}
361
362#define savage_PCI_IDS \
363 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
364 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
365 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
366 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
367 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
368 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
369 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
370 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
371 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
372 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
373 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
374 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
375 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
376 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
377 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
378 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
379 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
380 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
381 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
382 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
383 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
384 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
385 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
386 {0, 0, 0}
387
388#define ffb_PCI_IDS \
389 {0, 0, 0}
390
391#define i915_PCI_IDS \
392 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
393 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
394 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
395 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
396 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
397 {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
398 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
399 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
400 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
401 {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
402 {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
403 {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
404 {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
405 {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
406 {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
407 {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
408 {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
409 {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
410 {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
411 {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
412 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
413 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
414 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
415 {0, 0, 0}
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
deleted file mode 100644
index 480037331e4e..000000000000
--- a/drivers/char/drm/drm_sarea.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/**
2 * \file drm_sarea.h
3 * \brief SAREA definitions
4 *
5 * \author Michel Dänzer <michel@daenzer.net>
6 */
7
8/*
9 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
10 * All Rights Reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the next
20 * paragraph) shall be included in all copies or substantial portions of the
21 * Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
27 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
28 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
29 * OTHER DEALINGS IN THE SOFTWARE.
30 */
31
32#ifndef _DRM_SAREA_H_
33#define _DRM_SAREA_H_
34
35#include "drm.h"
36
37/* SAREA area needs to be at least a page */
38#if defined(__alpha__)
39#define SAREA_MAX 0x2000
40#elif defined(__ia64__)
41#define SAREA_MAX 0x10000 /* 64kB */
42#else
43/* Intel 830M driver needs at least 8k SAREA */
44#define SAREA_MAX 0x2000
45#endif
46
47/** Maximum number of drawables in the SAREA */
48#define SAREA_MAX_DRAWABLES 256
49
50#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
51
52/** SAREA drawable */
53struct drm_sarea_drawable {
54 unsigned int stamp;
55 unsigned int flags;
56};
57
58/** SAREA frame */
59struct drm_sarea_frame {
60 unsigned int x;
61 unsigned int y;
62 unsigned int width;
63 unsigned int height;
64 unsigned int fullscreen;
65};
66
67/** SAREA */
68struct drm_sarea {
69 /** first thing is always the DRM locking structure */
70 struct drm_hw_lock lock;
71 /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
72 struct drm_hw_lock drawable_lock;
73 struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
74 struct drm_sarea_frame frame; /**< frame */
75 drm_context_t dummy_context;
76};
77
78#ifndef __KERNEL__
79typedef struct drm_sarea_drawable drm_sarea_drawable_t;
80typedef struct drm_sarea_frame drm_sarea_frame_t;
81typedef struct drm_sarea drm_sarea_t;
82#endif
83
84#endif /* _DRM_SAREA_H_ */
diff --git a/drivers/char/drm/drm_sman.h b/drivers/char/drm/drm_sman.h
deleted file mode 100644
index 08ecf83ad5d4..000000000000
--- a/drivers/char/drm/drm_sman.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory MANager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#ifndef DRM_SMAN_H
40#define DRM_SMAN_H
41
42#include "drmP.h"
43#include "drm_hashtab.h"
44
45/*
46 * A class that is an abstration of a simple memory allocator.
47 * The sman implementation provides a default such allocator
48 * using the drm_mm.c implementation. But the user can replace it.
49 * See the SiS implementation, which may use the SiS FB kernel module
50 * for memory management.
51 */
52
53struct drm_sman_mm {
54 /* private info. If allocated, needs to be destroyed by the destroy
55 function */
56 void *private;
57
58 /* Allocate a memory block with given size and alignment.
59 Return an opaque reference to the memory block */
60
61 void *(*allocate) (void *private, unsigned long size,
62 unsigned alignment);
63
64 /* Free a memory block. "ref" is the opaque reference that we got from
65 the "alloc" function */
66
67 void (*free) (void *private, void *ref);
68
69 /* Free all resources associated with this allocator */
70
71 void (*destroy) (void *private);
72
73 /* Return a memory offset from the opaque reference returned from the
74 "alloc" function */
75
76 unsigned long (*offset) (void *private, void *ref);
77};
78
79struct drm_memblock_item {
80 struct list_head owner_list;
81 struct drm_hash_item user_hash;
82 void *mm_info;
83 struct drm_sman_mm *mm;
84 struct drm_sman *sman;
85};
86
87struct drm_sman {
88 struct drm_sman_mm *mm;
89 int num_managers;
90 struct drm_open_hash owner_hash_tab;
91 struct drm_open_hash user_hash_tab;
92 struct list_head owner_items;
93};
94
95/*
96 * Take down a memory manager. This function should only be called after a
97 * successful init and after a call to drm_sman_cleanup.
98 */
99
100extern void drm_sman_takedown(struct drm_sman * sman);
101
102/*
103 * Allocate structures for a manager.
104 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
105 * user_order is the log2 of the number of buckets in the user hash table.
106 * set this to approximately log2 of the max number of memory regions
107 * that will be allocated for _all_ pools together.
108 * owner_order is the log2 of the number of buckets in the owner hash table.
109 * set this to approximately log2 of
110 * the number of client file connections that will
111 * be using the manager.
112 *
113 */
114
115extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
116 unsigned int user_order, unsigned int owner_order);
117
118/*
119 * Initialize a drm_mm.c allocator. Should be called only once for each
120 * manager unless a customized allogator is used.
121 */
122
123extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
124 unsigned long start, unsigned long size);
125
126/*
127 * Initialize a customized allocator for one of the managers.
128 * (See the SiS module). The object pointed to by "allocator" is copied,
129 * so it can be destroyed after this call.
130 */
131
132extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
133 struct drm_sman_mm * allocator);
134
135/*
136 * Allocate a memory block. Aligment is not implemented yet.
137 */
138
139extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
140 unsigned int manager,
141 unsigned long size,
142 unsigned alignment,
143 unsigned long owner);
144/*
145 * Free a memory block identified by its user hash key.
146 */
147
148extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
149
150/*
151 * returns 1 iff there are no stale memory blocks associated with this owner.
152 * Typically called to determine if we need to idle the hardware and call
153 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
154 * resources associated with owner.
155 */
156
157extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
158
159/*
160 * Frees all stale memory blocks associated with this owner. Note that this
161 * requires that the hardware is finished with all blocks, so the graphics engine
162 * should be idled before this call is made. This function also frees
163 * any resources associated with "owner" and should be called when owner
164 * is not going to be referenced anymore.
165 */
166
167extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
168
169/*
170 * Frees all stale memory blocks associated with the memory manager.
171 * See idling above.
172 */
173
174extern void drm_sman_cleanup(struct drm_sman * sman);
175
176#endif
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
deleted file mode 100644
index 7a10bb6f2c0f..000000000000
--- a/drivers/char/drm/i810_drm.h
+++ /dev/null
@@ -1,281 +0,0 @@
1#ifndef _I810_DRM_H_
2#define _I810_DRM_H_
3
4/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver.
6 */
7
8#ifndef _I810_DEFINES_
9#define _I810_DEFINES_
10
11#define I810_DMA_BUF_ORDER 12
12#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
13#define I810_DMA_BUF_NR 256
14#define I810_NR_SAREA_CLIPRECTS 8
15
16/* Each region is a minimum of 64k, and there are at most 64 of them.
17 */
18#define I810_NR_TEX_REGIONS 64
19#define I810_LOG_MIN_TEX_REGION_SIZE 16
20#endif
21
22#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
23#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
24#define I810_UPLOAD_CTX 0x4
25#define I810_UPLOAD_BUFFERS 0x8
26#define I810_UPLOAD_TEX0 0x10
27#define I810_UPLOAD_TEX1 0x20
28#define I810_UPLOAD_CLIPRECTS 0x40
29
30/* Indices into buf.Setup where various bits of state are mirrored per
31 * context and per buffer. These can be fired at the card as a unit,
32 * or in a piecewise fashion as required.
33 */
34
35/* Destbuffer state
36 * - backbuffer linear offset and pitch -- invarient in the current dri
37 * - zbuffer linear offset and pitch -- also invarient
38 * - drawing origin in back and depth buffers.
39 *
40 * Keep the depth/back buffer state here to accommodate private buffers
41 * in the future.
42 */
43#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
44#define I810_DESTREG_DI1 1
45#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
46#define I810_DESTREG_DV1 3
47#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
48#define I810_DESTREG_DR1 5
49#define I810_DESTREG_DR2 6
50#define I810_DESTREG_DR3 7
51#define I810_DESTREG_DR4 8
52#define I810_DEST_SETUP_SIZE 10
53
54/* Context state
55 */
56#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
57#define I810_CTXREG_CF1 1
58#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
59#define I810_CTXREG_ST1 3
60#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
61#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
62#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
63#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
64#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
65#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
66#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
67#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
68#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
69#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
70#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
71#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
72#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
73#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
74#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
75#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
76#define I810_CTX_SETUP_SIZE 20
77
78/* Texture state (per tex unit)
79 */
80#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
81#define I810_TEXREG_MI1 1
82#define I810_TEXREG_MI2 2
83#define I810_TEXREG_MI3 3
84#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
85#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
86#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
87#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
88#define I810_TEX_SETUP_SIZE 8
89
90/* Flags for clear ioctl
91 */
92#define I810_FRONT 0x1
93#define I810_BACK 0x2
94#define I810_DEPTH 0x4
95
96typedef enum _drm_i810_init_func {
97 I810_INIT_DMA = 0x01,
98 I810_CLEANUP_DMA = 0x02,
99 I810_INIT_DMA_1_4 = 0x03
100} drm_i810_init_func_t;
101
102/* This is the init structure after v1.2 */
103typedef struct _drm_i810_init {
104 drm_i810_init_func_t func;
105 unsigned int mmio_offset;
106 unsigned int buffers_offset;
107 int sarea_priv_offset;
108 unsigned int ring_start;
109 unsigned int ring_end;
110 unsigned int ring_size;
111 unsigned int front_offset;
112 unsigned int back_offset;
113 unsigned int depth_offset;
114 unsigned int overlay_offset;
115 unsigned int overlay_physical;
116 unsigned int w;
117 unsigned int h;
118 unsigned int pitch;
119 unsigned int pitch_bits;
120} drm_i810_init_t;
121
122/* This is the init structure prior to v1.2 */
123typedef struct _drm_i810_pre12_init {
124 drm_i810_init_func_t func;
125 unsigned int mmio_offset;
126 unsigned int buffers_offset;
127 int sarea_priv_offset;
128 unsigned int ring_start;
129 unsigned int ring_end;
130 unsigned int ring_size;
131 unsigned int front_offset;
132 unsigned int back_offset;
133 unsigned int depth_offset;
134 unsigned int w;
135 unsigned int h;
136 unsigned int pitch;
137 unsigned int pitch_bits;
138} drm_i810_pre12_init_t;
139
140/* Warning: If you change the SAREA structure you must change the Xserver
141 * structure as well */
142
143typedef struct _drm_i810_tex_region {
144 unsigned char next, prev; /* indices to form a circular LRU */
145 unsigned char in_use; /* owned by a client, or free? */
146 int age; /* tracked by clients to update local LRU's */
147} drm_i810_tex_region_t;
148
149typedef struct _drm_i810_sarea {
150 unsigned int ContextState[I810_CTX_SETUP_SIZE];
151 unsigned int BufferState[I810_DEST_SETUP_SIZE];
152 unsigned int TexState[2][I810_TEX_SETUP_SIZE];
153 unsigned int dirty;
154
155 unsigned int nbox;
156 struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
157
158 /* Maintain an LRU of contiguous regions of texture space. If
159 * you think you own a region of texture memory, and it has an
160 * age different to the one you set, then you are mistaken and
161 * it has been stolen by another client. If global texAge
162 * hasn't changed, there is no need to walk the list.
163 *
164 * These regions can be used as a proxy for the fine-grained
165 * texture information of other clients - by maintaining them
166 * in the same lru which is used to age their own textures,
167 * clients have an approximate lru for the whole of global
168 * texture space, and can make informed decisions as to which
169 * areas to kick out. There is no need to choose whether to
170 * kick out your own texture or someone else's - simply eject
171 * them all in LRU order.
172 */
173
174 drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
175 /* Last elt is sentinal */
176 int texAge; /* last time texture was uploaded */
177 int last_enqueue; /* last time a buffer was enqueued */
178 int last_dispatch; /* age of the most recently dispatched buffer */
179 int last_quiescent; /* */
180 int ctxOwner; /* last context to upload state */
181
182 int vertex_prim;
183
184 int pf_enabled; /* is pageflipping allowed? */
185 int pf_active;
186 int pf_current_page; /* which buffer is being displayed? */
187} drm_i810_sarea_t;
188
189/* WARNING: If you change any of these defines, make sure to change the
190 * defines in the Xserver file (xf86drmMga.h)
191 */
192
193/* i810 specific ioctls
194 * The device specific ioctl range is 0x40 to 0x79.
195 */
196#define DRM_I810_INIT 0x00
197#define DRM_I810_VERTEX 0x01
198#define DRM_I810_CLEAR 0x02
199#define DRM_I810_FLUSH 0x03
200#define DRM_I810_GETAGE 0x04
201#define DRM_I810_GETBUF 0x05
202#define DRM_I810_SWAP 0x06
203#define DRM_I810_COPY 0x07
204#define DRM_I810_DOCOPY 0x08
205#define DRM_I810_OV0INFO 0x09
206#define DRM_I810_FSTATUS 0x0a
207#define DRM_I810_OV0FLIP 0x0b
208#define DRM_I810_MC 0x0c
209#define DRM_I810_RSTATUS 0x0d
210#define DRM_I810_FLIP 0x0e
211
212#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
213#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
214#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
215#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
216#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
217#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
218#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
219#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
220#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
221#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
222#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
223#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
224#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
225#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
226#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
227
228typedef struct _drm_i810_clear {
229 int clear_color;
230 int clear_depth;
231 int flags;
232} drm_i810_clear_t;
233
234/* These may be placeholders if we have more cliprects than
235 * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
236 * false, indicating that the buffer will be dispatched again with a
237 * new set of cliprects.
238 */
239typedef struct _drm_i810_vertex {
240 int idx; /* buffer index */
241 int used; /* nr bytes in use */
242 int discard; /* client is finished with the buffer? */
243} drm_i810_vertex_t;
244
245typedef struct _drm_i810_copy_t {
246 int idx; /* buffer index */
247 int used; /* nr bytes in use */
248 void *address; /* Address to copy from */
249} drm_i810_copy_t;
250
251#define PR_TRIANGLES (0x0<<18)
252#define PR_TRISTRIP_0 (0x1<<18)
253#define PR_TRISTRIP_1 (0x2<<18)
254#define PR_TRIFAN (0x3<<18)
255#define PR_POLYGON (0x4<<18)
256#define PR_LINES (0x5<<18)
257#define PR_LINESTRIP (0x6<<18)
258#define PR_RECTS (0x7<<18)
259#define PR_MASK (0x7<<18)
260
261typedef struct drm_i810_dma {
262 void *virtual;
263 int request_idx;
264 int request_size;
265 int granted;
266} drm_i810_dma_t;
267
268typedef struct _drm_i810_overlay_t {
269 unsigned int offset; /* Address of the Overlay Regs */
270 unsigned int physical;
271} drm_i810_overlay_t;
272
273typedef struct _drm_i810_mc {
274 int idx; /* buffer index */
275 int used; /* nr bytes in use */
276 int num_blocks; /* number of GFXBlocks */
277 int *length; /* List of lengths for GFXBlocks (FUTURE) */
278 unsigned int last_render; /* Last Render Request */
279} drm_i810_mc_t;
280
281#endif /* _I810_DRM_H_ */
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
deleted file mode 100644
index 4b00d2dd4f68..000000000000
--- a/drivers/char/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
1#ifndef _I830_DRM_H_
2#define _I830_DRM_H_
3
4/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver.
6 *
7 * KW: Actually, you can't ever change them because doing so would
8 * break backwards compatibility.
9 */
10
11#ifndef _I830_DEFINES_
12#define _I830_DEFINES_
13
14#define I830_DMA_BUF_ORDER 12
15#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
16#define I830_DMA_BUF_NR 256
17#define I830_NR_SAREA_CLIPRECTS 8
18
19/* Each region is a minimum of 64k, and there are at most 64 of them.
20 */
21#define I830_NR_TEX_REGIONS 64
22#define I830_LOG_MIN_TEX_REGION_SIZE 16
23
24/* KW: These aren't correct but someone set them to two and then
25 * released the module. Now we can't change them as doing so would
26 * break backwards compatibility.
27 */
28#define I830_TEXTURE_COUNT 2
29#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
30
31#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
32
33#define I830_UPLOAD_CTX 0x1
34#define I830_UPLOAD_BUFFERS 0x2
35#define I830_UPLOAD_CLIPRECTS 0x4
36#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
37#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
38#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
39#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
40#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
41#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
42#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
43#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
44#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
45#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
46#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
47#define I830_UPLOAD_TEX0 0x10000
48#define I830_UPLOAD_TEX1 0x20000
49#define I830_UPLOAD_TEX2 0x40000
50#define I830_UPLOAD_TEX3 0x80000
51#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
52#define I830_UPLOAD_TEX_MASK 0xf0000
53#define I830_UPLOAD_TEXBLEND0 0x100000
54#define I830_UPLOAD_TEXBLEND1 0x200000
55#define I830_UPLOAD_TEXBLEND2 0x400000
56#define I830_UPLOAD_TEXBLEND3 0x800000
57#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
58#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
59#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
60#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
61#define I830_UPLOAD_STIPPLE 0x8000000
62
63/* Indices into buf.Setup where various bits of state are mirrored per
64 * context and per buffer. These can be fired at the card as a unit,
65 * or in a piecewise fashion as required.
66 */
67
68/* Destbuffer state
69 * - backbuffer linear offset and pitch -- invarient in the current dri
70 * - zbuffer linear offset and pitch -- also invarient
71 * - drawing origin in back and depth buffers.
72 *
73 * Keep the depth/back buffer state here to accommodate private buffers
74 * in the future.
75 */
76
77#define I830_DESTREG_CBUFADDR 0
78#define I830_DESTREG_DBUFADDR 1
79#define I830_DESTREG_DV0 2
80#define I830_DESTREG_DV1 3
81#define I830_DESTREG_SENABLE 4
82#define I830_DESTREG_SR0 5
83#define I830_DESTREG_SR1 6
84#define I830_DESTREG_SR2 7
85#define I830_DESTREG_DR0 8
86#define I830_DESTREG_DR1 9
87#define I830_DESTREG_DR2 10
88#define I830_DESTREG_DR3 11
89#define I830_DESTREG_DR4 12
90#define I830_DEST_SETUP_SIZE 13
91
92/* Context state
93 */
94#define I830_CTXREG_STATE1 0
95#define I830_CTXREG_STATE2 1
96#define I830_CTXREG_STATE3 2
97#define I830_CTXREG_STATE4 3
98#define I830_CTXREG_STATE5 4
99#define I830_CTXREG_IALPHAB 5
100#define I830_CTXREG_STENCILTST 6
101#define I830_CTXREG_ENABLES_1 7
102#define I830_CTXREG_ENABLES_2 8
103#define I830_CTXREG_AA 9
104#define I830_CTXREG_FOGCOLOR 10
105#define I830_CTXREG_BLENDCOLR0 11
106#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
107#define I830_CTXREG_VF 13
108#define I830_CTXREG_VF2 14
109#define I830_CTXREG_MCSB0 15
110#define I830_CTXREG_MCSB1 16
111#define I830_CTX_SETUP_SIZE 17
112
113/* 1.3: Stipple state
114 */
115#define I830_STPREG_ST0 0
116#define I830_STPREG_ST1 1
117#define I830_STP_SETUP_SIZE 2
118
119/* Texture state (per tex unit)
120 */
121
122#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
123#define I830_TEXREG_MI1 1
124#define I830_TEXREG_MI2 2
125#define I830_TEXREG_MI3 3
126#define I830_TEXREG_MI4 4
127#define I830_TEXREG_MI5 5
128#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
129#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
130#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
131#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
132#define I830_TEX_SETUP_SIZE 10
133
134#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
135#define I830_TEXREG_TM0S0 1
136#define I830_TEXREG_TM0S1 2
137#define I830_TEXREG_TM0S2 3
138#define I830_TEXREG_TM0S3 4
139#define I830_TEXREG_TM0S4 5
140#define I830_TEXREG_NOP0 6 /* noop */
141#define I830_TEXREG_NOP1 7 /* noop */
142#define I830_TEXREG_NOP2 8 /* noop */
143#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
144#define __I830_TEX_SETUP_SIZE 10
145
146#define I830_FRONT 0x1
147#define I830_BACK 0x2
148#define I830_DEPTH 0x4
149
150#endif /* _I830_DEFINES_ */
151
152typedef struct _drm_i830_init {
153 enum {
154 I830_INIT_DMA = 0x01,
155 I830_CLEANUP_DMA = 0x02
156 } func;
157 unsigned int mmio_offset;
158 unsigned int buffers_offset;
159 int sarea_priv_offset;
160 unsigned int ring_start;
161 unsigned int ring_end;
162 unsigned int ring_size;
163 unsigned int front_offset;
164 unsigned int back_offset;
165 unsigned int depth_offset;
166 unsigned int w;
167 unsigned int h;
168 unsigned int pitch;
169 unsigned int pitch_bits;
170 unsigned int back_pitch;
171 unsigned int depth_pitch;
172 unsigned int cpp;
173} drm_i830_init_t;
174
175/* Warning: If you change the SAREA structure you must change the Xserver
176 * structure as well */
177
178typedef struct _drm_i830_tex_region {
179 unsigned char next, prev; /* indices to form a circular LRU */
180 unsigned char in_use; /* owned by a client, or free? */
181 int age; /* tracked by clients to update local LRU's */
182} drm_i830_tex_region_t;
183
184typedef struct _drm_i830_sarea {
185 unsigned int ContextState[I830_CTX_SETUP_SIZE];
186 unsigned int BufferState[I830_DEST_SETUP_SIZE];
187 unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
188 unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
189 unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
190 unsigned int Palette[2][256];
191 unsigned int dirty;
192
193 unsigned int nbox;
194 struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
195
196 /* Maintain an LRU of contiguous regions of texture space. If
197 * you think you own a region of texture memory, and it has an
198 * age different to the one you set, then you are mistaken and
199 * it has been stolen by another client. If global texAge
200 * hasn't changed, there is no need to walk the list.
201 *
202 * These regions can be used as a proxy for the fine-grained
203 * texture information of other clients - by maintaining them
204 * in the same lru which is used to age their own textures,
205 * clients have an approximate lru for the whole of global
206 * texture space, and can make informed decisions as to which
207 * areas to kick out. There is no need to choose whether to
208 * kick out your own texture or someone else's - simply eject
209 * them all in LRU order.
210 */
211
212 drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
213 /* Last elt is sentinal */
214 int texAge; /* last time texture was uploaded */
215 int last_enqueue; /* last time a buffer was enqueued */
216 int last_dispatch; /* age of the most recently dispatched buffer */
217 int last_quiescent; /* */
218 int ctxOwner; /* last context to upload state */
219
220 int vertex_prim;
221
222 int pf_enabled; /* is pageflipping allowed? */
223 int pf_active;
224 int pf_current_page; /* which buffer is being displayed? */
225
226 int perf_boxes; /* performance boxes to be displayed */
227
228 /* Here's the state for texunits 2,3:
229 */
230 unsigned int TexState2[I830_TEX_SETUP_SIZE];
231 unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
232 unsigned int TexBlendStateWordsUsed2;
233
234 unsigned int TexState3[I830_TEX_SETUP_SIZE];
235 unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
236 unsigned int TexBlendStateWordsUsed3;
237
238 unsigned int StippleState[I830_STP_SETUP_SIZE];
239} drm_i830_sarea_t;
240
241/* Flags for perf_boxes
242 */
243#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
244#define I830_BOX_FLIP 0x2 /* populated by kernel */
245#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
246#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
247#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
248
249/* I830 specific ioctls
250 * The device specific ioctl range is 0x40 to 0x79.
251 */
252#define DRM_I830_INIT 0x00
253#define DRM_I830_VERTEX 0x01
254#define DRM_I830_CLEAR 0x02
255#define DRM_I830_FLUSH 0x03
256#define DRM_I830_GETAGE 0x04
257#define DRM_I830_GETBUF 0x05
258#define DRM_I830_SWAP 0x06
259#define DRM_I830_COPY 0x07
260#define DRM_I830_DOCOPY 0x08
261#define DRM_I830_FLIP 0x09
262#define DRM_I830_IRQ_EMIT 0x0a
263#define DRM_I830_IRQ_WAIT 0x0b
264#define DRM_I830_GETPARAM 0x0c
265#define DRM_I830_SETPARAM 0x0d
266
267#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
268#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
269#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
270#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
271#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
272#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
273#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
274#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
275#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
276#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
277#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
278#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
279#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
280#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
281
282typedef struct _drm_i830_clear {
283 int clear_color;
284 int clear_depth;
285 int flags;
286 unsigned int clear_colormask;
287 unsigned int clear_depthmask;
288} drm_i830_clear_t;
289
290/* These may be placeholders if we have more cliprects than
291 * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
292 * false, indicating that the buffer will be dispatched again with a
293 * new set of cliprects.
294 */
295typedef struct _drm_i830_vertex {
296 int idx; /* buffer index */
297 int used; /* nr bytes in use */
298 int discard; /* client is finished with the buffer? */
299} drm_i830_vertex_t;
300
301typedef struct _drm_i830_copy_t {
302 int idx; /* buffer index */
303 int used; /* nr bytes in use */
304 void __user *address; /* Address to copy from */
305} drm_i830_copy_t;
306
307typedef struct drm_i830_dma {
308 void __user *virtual;
309 int request_idx;
310 int request_size;
311 int granted;
312} drm_i830_dma_t;
313
314/* 1.3: Userspace can request & wait on irq's:
315 */
316typedef struct drm_i830_irq_emit {
317 int __user *irq_seq;
318} drm_i830_irq_emit_t;
319
320typedef struct drm_i830_irq_wait {
321 int irq_seq;
322} drm_i830_irq_wait_t;
323
324/* 1.3: New ioctl to query kernel params:
325 */
326#define I830_PARAM_IRQ_ACTIVE 1
327
328typedef struct drm_i830_getparam {
329 int param;
330 int __user *value;
331} drm_i830_getparam_t;
332
333/* 1.3: New ioctl to set kernel params:
334 */
335#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
336
337typedef struct drm_i830_setparam {
338 int param;
339 int value;
340} drm_i830_setparam_t;
341
342#endif /* _I830_DRM_H_ */
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
deleted file mode 100644
index 05c66cf03a9e..000000000000
--- a/drivers/char/drm/i915_drm.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _I915_DRM_H_
28#define _I915_DRM_H_
29
30/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints.
32 */
33
34#include "drm.h"
35
36/* Each region is a minimum of 16k, and there are at most 255 of them.
37 */
38#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
39 * of chars for next/prev indices */
40#define I915_LOG_MIN_TEX_REGION_SIZE 14
41
42typedef struct _drm_i915_init {
43 enum {
44 I915_INIT_DMA = 0x01,
45 I915_CLEANUP_DMA = 0x02,
46 I915_RESUME_DMA = 0x03
47 } func;
48 unsigned int mmio_offset;
49 int sarea_priv_offset;
50 unsigned int ring_start;
51 unsigned int ring_end;
52 unsigned int ring_size;
53 unsigned int front_offset;
54 unsigned int back_offset;
55 unsigned int depth_offset;
56 unsigned int w;
57 unsigned int h;
58 unsigned int pitch;
59 unsigned int pitch_bits;
60 unsigned int back_pitch;
61 unsigned int depth_pitch;
62 unsigned int cpp;
63 unsigned int chipset;
64} drm_i915_init_t;
65
66typedef struct _drm_i915_sarea {
67 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
68 int last_upload; /* last time texture was uploaded */
69 int last_enqueue; /* last time a buffer was enqueued */
70 int last_dispatch; /* age of the most recently dispatched buffer */
71 int ctxOwner; /* last context to upload state */
72 int texAge;
73 int pf_enabled; /* is pageflipping allowed? */
74 int pf_active;
75 int pf_current_page; /* which buffer is being displayed? */
76 int perf_boxes; /* performance boxes to be displayed */
77 int width, height; /* screen size in pixels */
78
79 drm_handle_t front_handle;
80 int front_offset;
81 int front_size;
82
83 drm_handle_t back_handle;
84 int back_offset;
85 int back_size;
86
87 drm_handle_t depth_handle;
88 int depth_offset;
89 int depth_size;
90
91 drm_handle_t tex_handle;
92 int tex_offset;
93 int tex_size;
94 int log_tex_granularity;
95 int pitch;
96 int rotation; /* 0, 90, 180 or 270 */
97 int rotated_offset;
98 int rotated_size;
99 int rotated_pitch;
100 int virtualX, virtualY;
101
102 unsigned int front_tiled;
103 unsigned int back_tiled;
104 unsigned int depth_tiled;
105 unsigned int rotated_tiled;
106 unsigned int rotated2_tiled;
107
108 int pipeA_x;
109 int pipeA_y;
110 int pipeA_w;
111 int pipeA_h;
112 int pipeB_x;
113 int pipeB_y;
114 int pipeB_w;
115 int pipeB_h;
116} drm_i915_sarea_t;
117
118/* Flags for perf_boxes
119 */
120#define I915_BOX_RING_EMPTY 0x1
121#define I915_BOX_FLIP 0x2
122#define I915_BOX_WAIT 0x4
123#define I915_BOX_TEXTURE_LOAD 0x8
124#define I915_BOX_LOST_CONTEXT 0x10
125
126/* I915 specific ioctls
127 * The device specific ioctl range is 0x40 to 0x79.
128 */
129#define DRM_I915_INIT 0x00
130#define DRM_I915_FLUSH 0x01
131#define DRM_I915_FLIP 0x02
132#define DRM_I915_BATCHBUFFER 0x03
133#define DRM_I915_IRQ_EMIT 0x04
134#define DRM_I915_IRQ_WAIT 0x05
135#define DRM_I915_GETPARAM 0x06
136#define DRM_I915_SETPARAM 0x07
137#define DRM_I915_ALLOC 0x08
138#define DRM_I915_FREE 0x09
139#define DRM_I915_INIT_HEAP 0x0a
140#define DRM_I915_CMDBUFFER 0x0b
141#define DRM_I915_DESTROY_HEAP 0x0c
142#define DRM_I915_SET_VBLANK_PIPE 0x0d
143#define DRM_I915_GET_VBLANK_PIPE 0x0e
144#define DRM_I915_VBLANK_SWAP 0x0f
145#define DRM_I915_HWS_ADDR 0x11
146
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
149#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
150#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
151#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
152#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
153#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
154#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
155#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
156#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
157#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
158#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
159#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
160#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
163
164/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware.
166 */
167typedef struct _drm_i915_batchbuffer {
168 int start; /* agp offset */
169 int used; /* nr bytes in use */
170 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
171 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
172 int num_cliprects; /* mulitpass with multiple cliprects? */
173 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
174} drm_i915_batchbuffer_t;
175
176/* As above, but pass a pointer to userspace buffer which can be
177 * validated by the kernel prior to sending to hardware.
178 */
179typedef struct _drm_i915_cmdbuffer {
180 char __user *buf; /* pointer to userspace command buffer */
181 int sz; /* nr bytes in buf */
182 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
183 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
184 int num_cliprects; /* mulitpass with multiple cliprects? */
185 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
186} drm_i915_cmdbuffer_t;
187
188/* Userspace can request & wait on irq's:
189 */
190typedef struct drm_i915_irq_emit {
191 int __user *irq_seq;
192} drm_i915_irq_emit_t;
193
194typedef struct drm_i915_irq_wait {
195 int irq_seq;
196} drm_i915_irq_wait_t;
197
198/* Ioctl to query kernel params:
199 */
200#define I915_PARAM_IRQ_ACTIVE 1
201#define I915_PARAM_ALLOW_BATCHBUFFER 2
202#define I915_PARAM_LAST_DISPATCH 3
203
204typedef struct drm_i915_getparam {
205 int param;
206 int __user *value;
207} drm_i915_getparam_t;
208
209/* Ioctl to set kernel params:
210 */
211#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
212#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
213#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
214
215typedef struct drm_i915_setparam {
216 int param;
217 int value;
218} drm_i915_setparam_t;
219
220/* A memory manager for regions of shared memory:
221 */
222#define I915_MEM_REGION_AGP 1
223
224typedef struct drm_i915_mem_alloc {
225 int region;
226 int alignment;
227 int size;
228 int __user *region_offset; /* offset from start of fb or agp */
229} drm_i915_mem_alloc_t;
230
231typedef struct drm_i915_mem_free {
232 int region;
233 int region_offset;
234} drm_i915_mem_free_t;
235
236typedef struct drm_i915_mem_init_heap {
237 int region;
238 int size;
239 int start;
240} drm_i915_mem_init_heap_t;
241
242/* Allow memory manager to be torn down and re-initialized (eg on
243 * rotate):
244 */
245typedef struct drm_i915_mem_destroy_heap {
246 int region;
247} drm_i915_mem_destroy_heap_t;
248
249/* Allow X server to configure which pipes to monitor for vblank signals
250 */
251#define DRM_I915_VBLANK_PIPE_A 1
252#define DRM_I915_VBLANK_PIPE_B 2
253
254typedef struct drm_i915_vblank_pipe {
255 int pipe;
256} drm_i915_vblank_pipe_t;
257
258/* Schedule buffer swap at given vertical blank:
259 */
260typedef struct drm_i915_vblank_swap {
261 drm_drawable_t drawable;
262 enum drm_vblank_seq_type seqtype;
263 unsigned int sequence;
264} drm_i915_vblank_swap_t;
265
266typedef struct drm_i915_hws_addr {
267 uint64_t addr;
268} drm_i915_hws_addr_t;
269
270#endif /* _I915_DRM_H_ */
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
deleted file mode 100644
index 944b50a5ff24..000000000000
--- a/drivers/char/drm/mga_drm.h
+++ /dev/null
@@ -1,417 +0,0 @@
1/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
2 * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 * Rewritten by:
32 * Gareth Hughes <gareth@valinux.com>
33 */
34
35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__
37
38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the Xserver file (mga_sarea.h)
40 */
41
42#ifndef __MGA_SAREA_DEFINES__
43#define __MGA_SAREA_DEFINES__
44
45/* WARP pipe flags
46 */
47#define MGA_F 0x1 /* fog */
48#define MGA_A 0x2 /* alpha */
49#define MGA_S 0x4 /* specular */
50#define MGA_T2 0x8 /* multitexture */
51
52#define MGA_WARP_TGZ 0
53#define MGA_WARP_TGZF (MGA_F)
54#define MGA_WARP_TGZA (MGA_A)
55#define MGA_WARP_TGZAF (MGA_F|MGA_A)
56#define MGA_WARP_TGZS (MGA_S)
57#define MGA_WARP_TGZSF (MGA_S|MGA_F)
58#define MGA_WARP_TGZSA (MGA_S|MGA_A)
59#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
60#define MGA_WARP_T2GZ (MGA_T2)
61#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
62#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
63#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
64#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
65#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
66#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
67#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
68
69#define MGA_MAX_G200_PIPES 8 /* no multitex */
70#define MGA_MAX_G400_PIPES 16
71#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
72#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
73
74#define MGA_CARD_TYPE_G200 1
75#define MGA_CARD_TYPE_G400 2
76#define MGA_CARD_TYPE_G450 3 /* not currently used */
77#define MGA_CARD_TYPE_G550 4
78
79#define MGA_FRONT 0x1
80#define MGA_BACK 0x2
81#define MGA_DEPTH 0x4
82
83/* What needs to be changed for the current vertex dma buffer?
84 */
85#define MGA_UPLOAD_CONTEXT 0x1
86#define MGA_UPLOAD_TEX0 0x2
87#define MGA_UPLOAD_TEX1 0x4
88#define MGA_UPLOAD_PIPE 0x8
89#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
90#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
91#define MGA_UPLOAD_2D 0x40
92#define MGA_WAIT_AGE 0x80 /* handled client-side */
93#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
94#if 0
95#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
96 quiescent */
97#endif
98
99/* 32 buffers of 64k each, total 2 meg.
100 */
101#define MGA_BUFFER_SIZE (1 << 16)
102#define MGA_NUM_BUFFERS 128
103
104/* Keep these small for testing.
105 */
106#define MGA_NR_SAREA_CLIPRECTS 8
107
108/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
109 * regions, subject to a minimum region size of (1<<16) == 64k.
110 *
111 * Clients may subdivide regions internally, but when sharing between
112 * clients, the region size is the minimum granularity.
113 */
114
115#define MGA_CARD_HEAP 0
116#define MGA_AGP_HEAP 1
117#define MGA_NR_TEX_HEAPS 2
118#define MGA_NR_TEX_REGIONS 16
119#define MGA_LOG_MIN_TEX_REGION_SIZE 16
120
121#define DRM_MGA_IDLE_RETRY 2048
122
123#endif /* __MGA_SAREA_DEFINES__ */
124
125/* Setup registers for 3D context
126 */
127typedef struct {
128 unsigned int dstorg;
129 unsigned int maccess;
130 unsigned int plnwt;
131 unsigned int dwgctl;
132 unsigned int alphactrl;
133 unsigned int fogcolor;
134 unsigned int wflag;
135 unsigned int tdualstage0;
136 unsigned int tdualstage1;
137 unsigned int fcol;
138 unsigned int stencil;
139 unsigned int stencilctl;
140} drm_mga_context_regs_t;
141
142/* Setup registers for 2D, X server
143 */
144typedef struct {
145 unsigned int pitch;
146} drm_mga_server_regs_t;
147
148/* Setup registers for each texture unit
149 */
150typedef struct {
151 unsigned int texctl;
152 unsigned int texctl2;
153 unsigned int texfilter;
154 unsigned int texbordercol;
155 unsigned int texorg;
156 unsigned int texwidth;
157 unsigned int texheight;
158 unsigned int texorg1;
159 unsigned int texorg2;
160 unsigned int texorg3;
161 unsigned int texorg4;
162} drm_mga_texture_regs_t;
163
164/* General aging mechanism
165 */
166typedef struct {
167 unsigned int head; /* Position of head pointer */
168 unsigned int wrap; /* Primary DMA wrap count */
169} drm_mga_age_t;
170
171typedef struct _drm_mga_sarea {
172 /* The channel for communication of state information to the kernel
173 * on firing a vertex dma buffer.
174 */
175 drm_mga_context_regs_t context_state;
176 drm_mga_server_regs_t server_state;
177 drm_mga_texture_regs_t tex_state[2];
178 unsigned int warp_pipe;
179 unsigned int dirty;
180 unsigned int vertsize;
181
182 /* The current cliprects, or a subset thereof.
183 */
184 struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
185 unsigned int nbox;
186
187 /* Information about the most recently used 3d drawable. The
188 * client fills in the req_* fields, the server fills in the
189 * exported_ fields and puts the cliprects into boxes, above.
190 *
191 * The client clears the exported_drawable field before
192 * clobbering the boxes data.
193 */
194 unsigned int req_drawable; /* the X drawable id */
195 unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
196
197 unsigned int exported_drawable;
198 unsigned int exported_index;
199 unsigned int exported_stamp;
200 unsigned int exported_buffers;
201 unsigned int exported_nfront;
202 unsigned int exported_nback;
203 int exported_back_x, exported_front_x, exported_w;
204 int exported_back_y, exported_front_y, exported_h;
205 struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
206
207 /* Counters for aging textures and for client-side throttling.
208 */
209 unsigned int status[4];
210 unsigned int last_wrap;
211
212 drm_mga_age_t last_frame;
213 unsigned int last_enqueue; /* last time a buffer was enqueued */
214 unsigned int last_dispatch; /* age of the most recently dispatched buffer */
215 unsigned int last_quiescent; /* */
216
217 /* LRU lists for texture memory in agp space and on the card.
218 */
219 struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
220 unsigned int texAge[MGA_NR_TEX_HEAPS];
221
222 /* Mechanism to validate card state.
223 */
224 int ctxOwner;
225} drm_mga_sarea_t;
226
227/* MGA specific ioctls
228 * The device specific ioctl range is 0x40 to 0x79.
229 */
230#define DRM_MGA_INIT 0x00
231#define DRM_MGA_FLUSH 0x01
232#define DRM_MGA_RESET 0x02
233#define DRM_MGA_SWAP 0x03
234#define DRM_MGA_CLEAR 0x04
235#define DRM_MGA_VERTEX 0x05
236#define DRM_MGA_INDICES 0x06
237#define DRM_MGA_ILOAD 0x07
238#define DRM_MGA_BLIT 0x08
239#define DRM_MGA_GETPARAM 0x09
240
241/* 3.2:
242 * ioctls for operating on fences.
243 */
244#define DRM_MGA_SET_FENCE 0x0a
245#define DRM_MGA_WAIT_FENCE 0x0b
246#define DRM_MGA_DMA_BOOTSTRAP 0x0c
247
248#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
249#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
250#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
251#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
252#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
253#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
254#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
255#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
256#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
257#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
258#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
259#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
260#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
261
262typedef struct _drm_mga_warp_index {
263 int installed;
264 unsigned long phys_addr;
265 int size;
266} drm_mga_warp_index_t;
267
268typedef struct drm_mga_init {
269 enum {
270 MGA_INIT_DMA = 0x01,
271 MGA_CLEANUP_DMA = 0x02
272 } func;
273
274 unsigned long sarea_priv_offset;
275
276 int chipset;
277 int sgram;
278
279 unsigned int maccess;
280
281 unsigned int fb_cpp;
282 unsigned int front_offset, front_pitch;
283 unsigned int back_offset, back_pitch;
284
285 unsigned int depth_cpp;
286 unsigned int depth_offset, depth_pitch;
287
288 unsigned int texture_offset[MGA_NR_TEX_HEAPS];
289 unsigned int texture_size[MGA_NR_TEX_HEAPS];
290
291 unsigned long fb_offset;
292 unsigned long mmio_offset;
293 unsigned long status_offset;
294 unsigned long warp_offset;
295 unsigned long primary_offset;
296 unsigned long buffers_offset;
297} drm_mga_init_t;
298
299typedef struct drm_mga_dma_bootstrap {
300 /**
301 * \name AGP texture region
302 *
303 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
304 * be filled in with the actual AGP texture settings.
305 *
306 * \warning
307 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
308 * is zero, it means that PCI memory (most likely through the use of
309 * an IOMMU) is being used for "AGP" textures.
310 */
311 /*@{ */
312 unsigned long texture_handle; /**< Handle used to map AGP textures. */
313 uint32_t texture_size; /**< Size of the AGP texture region. */
314 /*@} */
315
316 /**
317 * Requested size of the primary DMA region.
318 *
319 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
320 * filled in with the actual AGP mode. If AGP was not available
321 */
322 uint32_t primary_size;
323
324 /**
325 * Requested number of secondary DMA buffers.
326 *
327 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
328 * filled in with the actual number of secondary DMA buffers
329 * allocated. Particularly when PCI DMA is used, this may be
330 * (subtantially) less than the number requested.
331 */
332 uint32_t secondary_bin_count;
333
334 /**
335 * Requested size of each secondary DMA buffer.
336 *
337 * While the kernel \b is free to reduce
338 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
339 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
340 */
341 uint32_t secondary_bin_size;
342
343 /**
344 * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
345 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
346 * zero, it means that PCI DMA should be used, even if AGP is
347 * possible.
348 *
349 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
350 * filled in with the actual AGP mode. If AGP was not available
351 * (i.e., PCI DMA was used), this value will be zero.
352 */
353 uint32_t agp_mode;
354
355 /**
356 * Desired AGP GART size, measured in megabytes.
357 */
358 uint8_t agp_size;
359} drm_mga_dma_bootstrap_t;
360
361typedef struct drm_mga_clear {
362 unsigned int flags;
363 unsigned int clear_color;
364 unsigned int clear_depth;
365 unsigned int color_mask;
366 unsigned int depth_mask;
367} drm_mga_clear_t;
368
369typedef struct drm_mga_vertex {
370 int idx; /* buffer to queue */
371 int used; /* bytes in use */
372 int discard; /* client finished with buffer? */
373} drm_mga_vertex_t;
374
375typedef struct drm_mga_indices {
376 int idx; /* buffer to queue */
377 unsigned int start;
378 unsigned int end;
379 int discard; /* client finished with buffer? */
380} drm_mga_indices_t;
381
382typedef struct drm_mga_iload {
383 int idx;
384 unsigned int dstorg;
385 unsigned int length;
386} drm_mga_iload_t;
387
388typedef struct _drm_mga_blit {
389 unsigned int planemask;
390 unsigned int srcorg;
391 unsigned int dstorg;
392 int src_pitch, dst_pitch;
393 int delta_sx, delta_sy;
394 int delta_dx, delta_dy;
395 int height, ydir; /* flip image vertically */
396 int source_pitch, dest_pitch;
397} drm_mga_blit_t;
398
399/* 3.1: An ioctl to get parameters that aren't available to the 3d
400 * client any other way.
401 */
402#define MGA_PARAM_IRQ_NR 1
403
404/* 3.2: Query the actual card type. The DDX only distinguishes between
405 * G200 chips and non-G200 chips, which it calls G400. It turns out that
406 * there are some very sublte differences between the G4x0 chips and the G550
407 * chips. Using this parameter query, a client-side driver can detect the
408 * difference between a G4x0 and a G550.
409 */
410#define MGA_PARAM_CARD_TYPE 2
411
412typedef struct drm_mga_getparam {
413 int param;
414 void __user *value;
415} drm_mga_getparam_t;
416
417#endif
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
deleted file mode 100644
index 8d8878b55f55..000000000000
--- a/drivers/char/drm/r128_drm.h
+++ /dev/null
@@ -1,326 +0,0 @@
1/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 */
4/*
5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * All rights reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Gareth Hughes <gareth@valinux.com>
30 * Kevin E. Martin <martin@valinux.com>
31 */
32
33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__
35
36/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (r128_sarea.h)
38 */
39#ifndef __R128_SAREA_DEFINES__
40#define __R128_SAREA_DEFINES__
41
42/* What needs to be changed for the current vertex buffer?
43 */
44#define R128_UPLOAD_CONTEXT 0x001
45#define R128_UPLOAD_SETUP 0x002
46#define R128_UPLOAD_TEX0 0x004
47#define R128_UPLOAD_TEX1 0x008
48#define R128_UPLOAD_TEX0IMAGES 0x010
49#define R128_UPLOAD_TEX1IMAGES 0x020
50#define R128_UPLOAD_CORE 0x040
51#define R128_UPLOAD_MASKS 0x080
52#define R128_UPLOAD_WINDOW 0x100
53#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
54#define R128_REQUIRE_QUIESCENCE 0x400
55#define R128_UPLOAD_ALL 0x7ff
56
57#define R128_FRONT 0x1
58#define R128_BACK 0x2
59#define R128_DEPTH 0x4
60
61/* Primitive types
62 */
63#define R128_POINTS 0x1
64#define R128_LINES 0x2
65#define R128_LINE_STRIP 0x3
66#define R128_TRIANGLES 0x4
67#define R128_TRIANGLE_FAN 0x5
68#define R128_TRIANGLE_STRIP 0x6
69
70/* Vertex/indirect buffer size
71 */
72#define R128_BUFFER_SIZE 16384
73
74/* Byte offsets for indirect buffer data
75 */
76#define R128_INDEX_PRIM_OFFSET 20
77#define R128_HOSTDATA_BLIT_OFFSET 32
78
79/* Keep these small for testing.
80 */
81#define R128_NR_SAREA_CLIPRECTS 12
82
83/* There are 2 heaps (local/AGP). Each region within a heap is a
84 * minimum of 64k, and there are at most 64 of them per heap.
85 */
86#define R128_LOCAL_TEX_HEAP 0
87#define R128_AGP_TEX_HEAP 1
88#define R128_NR_TEX_HEAPS 2
89#define R128_NR_TEX_REGIONS 64
90#define R128_LOG_TEX_GRANULARITY 16
91
92#define R128_NR_CONTEXT_REGS 12
93
94#define R128_MAX_TEXTURE_LEVELS 11
95#define R128_MAX_TEXTURE_UNITS 2
96
97#endif /* __R128_SAREA_DEFINES__ */
98
99typedef struct {
100 /* Context state - can be written in one large chunk */
101 unsigned int dst_pitch_offset_c;
102 unsigned int dp_gui_master_cntl_c;
103 unsigned int sc_top_left_c;
104 unsigned int sc_bottom_right_c;
105 unsigned int z_offset_c;
106 unsigned int z_pitch_c;
107 unsigned int z_sten_cntl_c;
108 unsigned int tex_cntl_c;
109 unsigned int misc_3d_state_cntl_reg;
110 unsigned int texture_clr_cmp_clr_c;
111 unsigned int texture_clr_cmp_msk_c;
112 unsigned int fog_color_c;
113
114 /* Texture state */
115 unsigned int tex_size_pitch_c;
116 unsigned int constant_color_c;
117
118 /* Setup state */
119 unsigned int pm4_vc_fpu_setup;
120 unsigned int setup_cntl;
121
122 /* Mask state */
123 unsigned int dp_write_mask;
124 unsigned int sten_ref_mask_c;
125 unsigned int plane_3d_mask_c;
126
127 /* Window state */
128 unsigned int window_xy_offset;
129
130 /* Core state */
131 unsigned int scale_3d_cntl;
132} drm_r128_context_regs_t;
133
134/* Setup registers for each texture unit
135 */
136typedef struct {
137 unsigned int tex_cntl;
138 unsigned int tex_combine_cntl;
139 unsigned int tex_size_pitch;
140 unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
141 unsigned int tex_border_color;
142} drm_r128_texture_regs_t;
143
144typedef struct drm_r128_sarea {
145 /* The channel for communication of state information to the kernel
146 * on firing a vertex buffer.
147 */
148 drm_r128_context_regs_t context_state;
149 drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
150 unsigned int dirty;
151 unsigned int vertsize;
152 unsigned int vc_format;
153
154 /* The current cliprects, or a subset thereof.
155 */
156 struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
157 unsigned int nbox;
158
159 /* Counters for client-side throttling of rendering clients.
160 */
161 unsigned int last_frame;
162 unsigned int last_dispatch;
163
164 struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
165 unsigned int tex_age[R128_NR_TEX_HEAPS];
166 int ctx_owner;
167 int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
168 int pfCurrentPage; /* which buffer is being displayed? */
169} drm_r128_sarea_t;
170
171/* WARNING: If you change any of these defines, make sure to change the
172 * defines in the Xserver file (xf86drmR128.h)
173 */
174
175/* Rage 128 specific ioctls
176 * The device specific ioctl range is 0x40 to 0x79.
177 */
178#define DRM_R128_INIT 0x00
179#define DRM_R128_CCE_START 0x01
180#define DRM_R128_CCE_STOP 0x02
181#define DRM_R128_CCE_RESET 0x03
182#define DRM_R128_CCE_IDLE 0x04
183/* 0x05 not used */
184#define DRM_R128_RESET 0x06
185#define DRM_R128_SWAP 0x07
186#define DRM_R128_CLEAR 0x08
187#define DRM_R128_VERTEX 0x09
188#define DRM_R128_INDICES 0x0a
189#define DRM_R128_BLIT 0x0b
190#define DRM_R128_DEPTH 0x0c
191#define DRM_R128_STIPPLE 0x0d
192/* 0x0e not used */
193#define DRM_R128_INDIRECT 0x0f
194#define DRM_R128_FULLSCREEN 0x10
195#define DRM_R128_CLEAR2 0x11
196#define DRM_R128_GETPARAM 0x12
197#define DRM_R128_FLIP 0x13
198
199#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
200#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
201#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
202#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
203#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
204/* 0x05 not used */
205#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
206#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
207#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
208#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
209#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
210#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
211#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
212#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
213/* 0x0e not used */
214#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
215#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
216#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
217#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
218#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
219
220typedef struct drm_r128_init {
221 enum {
222 R128_INIT_CCE = 0x01,
223 R128_CLEANUP_CCE = 0x02
224 } func;
225 unsigned long sarea_priv_offset;
226 int is_pci;
227 int cce_mode;
228 int cce_secure;
229 int ring_size;
230 int usec_timeout;
231
232 unsigned int fb_bpp;
233 unsigned int front_offset, front_pitch;
234 unsigned int back_offset, back_pitch;
235 unsigned int depth_bpp;
236 unsigned int depth_offset, depth_pitch;
237 unsigned int span_offset;
238
239 unsigned long fb_offset;
240 unsigned long mmio_offset;
241 unsigned long ring_offset;
242 unsigned long ring_rptr_offset;
243 unsigned long buffers_offset;
244 unsigned long agp_textures_offset;
245} drm_r128_init_t;
246
247typedef struct drm_r128_cce_stop {
248 int flush;
249 int idle;
250} drm_r128_cce_stop_t;
251
252typedef struct drm_r128_clear {
253 unsigned int flags;
254 unsigned int clear_color;
255 unsigned int clear_depth;
256 unsigned int color_mask;
257 unsigned int depth_mask;
258} drm_r128_clear_t;
259
260typedef struct drm_r128_vertex {
261 int prim;
262 int idx; /* Index of vertex buffer */
263 int count; /* Number of vertices in buffer */
264 int discard; /* Client finished with buffer? */
265} drm_r128_vertex_t;
266
267typedef struct drm_r128_indices {
268 int prim;
269 int idx;
270 int start;
271 int end;
272 int discard; /* Client finished with buffer? */
273} drm_r128_indices_t;
274
275typedef struct drm_r128_blit {
276 int idx;
277 int pitch;
278 int offset;
279 int format;
280 unsigned short x, y;
281 unsigned short width, height;
282} drm_r128_blit_t;
283
284typedef struct drm_r128_depth {
285 enum {
286 R128_WRITE_SPAN = 0x01,
287 R128_WRITE_PIXELS = 0x02,
288 R128_READ_SPAN = 0x03,
289 R128_READ_PIXELS = 0x04
290 } func;
291 int n;
292 int __user *x;
293 int __user *y;
294 unsigned int __user *buffer;
295 unsigned char __user *mask;
296} drm_r128_depth_t;
297
298typedef struct drm_r128_stipple {
299 unsigned int __user *mask;
300} drm_r128_stipple_t;
301
302typedef struct drm_r128_indirect {
303 int idx;
304 int start;
305 int end;
306 int discard;
307} drm_r128_indirect_t;
308
309typedef struct drm_r128_fullscreen {
310 enum {
311 R128_INIT_FULLSCREEN = 0x01,
312 R128_CLEANUP_FULLSCREEN = 0x02
313 } func;
314} drm_r128_fullscreen_t;
315
316/* 2.3: An ioctl to get parameters that aren't available to the 3d
317 * client any other way.
318 */
319#define R128_PARAM_IRQ_NR 1
320
321typedef struct drm_r128_getparam {
322 int param;
323 void __user *value;
324} drm_r128_getparam_t;
325
326#endif
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
deleted file mode 100644
index 73ff51f12311..000000000000
--- a/drivers/char/drm/radeon_drm.h
+++ /dev/null
@@ -1,749 +0,0 @@
1/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
2 *
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * Keith Whitwell <keith@tungstengraphics.com>
31 */
32
33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__
35
36/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (radeon_sarea.h)
38 */
39#ifndef __RADEON_SAREA_DEFINES__
40#define __RADEON_SAREA_DEFINES__
41
42/* Old style state flags, required for sarea interface (1.1 and 1.2
43 * clears) and 1.2 drm_vertex2 ioctl.
44 */
45#define RADEON_UPLOAD_CONTEXT 0x00000001
46#define RADEON_UPLOAD_VERTFMT 0x00000002
47#define RADEON_UPLOAD_LINE 0x00000004
48#define RADEON_UPLOAD_BUMPMAP 0x00000008
49#define RADEON_UPLOAD_MASKS 0x00000010
50#define RADEON_UPLOAD_VIEWPORT 0x00000020
51#define RADEON_UPLOAD_SETUP 0x00000040
52#define RADEON_UPLOAD_TCL 0x00000080
53#define RADEON_UPLOAD_MISC 0x00000100
54#define RADEON_UPLOAD_TEX0 0x00000200
55#define RADEON_UPLOAD_TEX1 0x00000400
56#define RADEON_UPLOAD_TEX2 0x00000800
57#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
58#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
59#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
60#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
61#define RADEON_REQUIRE_QUIESCENCE 0x00010000
62#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
63#define RADEON_UPLOAD_ALL 0x003effff
64#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
65
66/* New style per-packet identifiers for use in cmd_buffer ioctl with
67 * the RADEON_EMIT_PACKET command. Comments relate new packets to old
68 * state bits and the packet size:
69 */
70#define RADEON_EMIT_PP_MISC 0 /* context/7 */
71#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
72#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
73#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
74#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
75#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
76#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
77#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
78#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
79#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
80#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
81#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
82#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
83#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
84#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
85#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
86#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
87#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
88#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
89#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
90#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
91#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
92#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
93#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
94#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
95#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
96#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
97#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
98#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
99#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
100#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
101#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
102#define R200_EMIT_VAP_CTL 32 /* vap/1 */
103#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
104#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
105#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
106#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
107#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
108#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
109#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
110#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
111#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
112#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
113#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
114#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
115#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
116#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
117#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
118#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
119#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
120#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
121#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
122#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
123#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
124#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
125#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
126#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
127#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
128#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
129#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
130#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
131#define R200_EMIT_PP_CUBIC_FACES_0 61
132#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
133#define R200_EMIT_PP_CUBIC_FACES_1 63
134#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
135#define R200_EMIT_PP_CUBIC_FACES_2 65
136#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
137#define R200_EMIT_PP_CUBIC_FACES_3 67
138#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
139#define R200_EMIT_PP_CUBIC_FACES_4 69
140#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
141#define R200_EMIT_PP_CUBIC_FACES_5 71
142#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
143#define RADEON_EMIT_PP_TEX_SIZE_0 73
144#define RADEON_EMIT_PP_TEX_SIZE_1 74
145#define RADEON_EMIT_PP_TEX_SIZE_2 75
146#define R200_EMIT_RB3D_BLENDCOLOR 76
147#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
148#define RADEON_EMIT_PP_CUBIC_FACES_0 78
149#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
150#define RADEON_EMIT_PP_CUBIC_FACES_1 80
151#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
152#define RADEON_EMIT_PP_CUBIC_FACES_2 82
153#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
154#define R200_EMIT_PP_TRI_PERF_CNTL 84
155#define R200_EMIT_PP_AFS_0 85
156#define R200_EMIT_PP_AFS_1 86
157#define R200_EMIT_ATF_TFACTOR 87
158#define R200_EMIT_PP_TXCTLALL_0 88
159#define R200_EMIT_PP_TXCTLALL_1 89
160#define R200_EMIT_PP_TXCTLALL_2 90
161#define R200_EMIT_PP_TXCTLALL_3 91
162#define R200_EMIT_PP_TXCTLALL_4 92
163#define R200_EMIT_PP_TXCTLALL_5 93
164#define R200_EMIT_VAP_PVS_CNTL 94
165#define RADEON_MAX_STATE_PACKETS 95
166
167/* Commands understood by cmd_buffer ioctl. More can be added but
168 * obviously these can't be removed or changed:
169 */
170#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
171#define RADEON_CMD_SCALARS 2 /* emit scalar data */
172#define RADEON_CMD_VECTORS 3 /* emit vector data */
173#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
174#define RADEON_CMD_PACKET3 5 /* emit hw packet */
175#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
176#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
177#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
178 * doesn't make the cpu wait, just
179 * the graphics hardware */
180#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
181
182typedef union {
183 int i;
184 struct {
185 unsigned char cmd_type, pad0, pad1, pad2;
186 } header;
187 struct {
188 unsigned char cmd_type, packet_id, pad0, pad1;
189 } packet;
190 struct {
191 unsigned char cmd_type, offset, stride, count;
192 } scalars;
193 struct {
194 unsigned char cmd_type, offset, stride, count;
195 } vectors;
196 struct {
197 unsigned char cmd_type, addr_lo, addr_hi, count;
198 } veclinear;
199 struct {
200 unsigned char cmd_type, buf_idx, pad0, pad1;
201 } dma;
202 struct {
203 unsigned char cmd_type, flags, pad0, pad1;
204 } wait;
205} drm_radeon_cmd_header_t;
206
207#define RADEON_WAIT_2D 0x1
208#define RADEON_WAIT_3D 0x2
209
210/* Allowed parameters for R300_CMD_PACKET3
211 */
212#define R300_CMD_PACKET3_CLEAR 0
213#define R300_CMD_PACKET3_RAW 1
214
215/* Commands understood by cmd_buffer ioctl for R300.
216 * The interface has not been stabilized, so some of these may be removed
217 * and eventually reordered before stabilization.
218 */
219#define R300_CMD_PACKET0 1
220#define R300_CMD_VPU 2 /* emit vertex program upload */
221#define R300_CMD_PACKET3 3 /* emit a packet3 */
222#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
223#define R300_CMD_CP_DELAY 5
224#define R300_CMD_DMA_DISCARD 6
225#define R300_CMD_WAIT 7
226# define R300_WAIT_2D 0x1
227# define R300_WAIT_3D 0x2
228/* these two defines are DOING IT WRONG - however
229 * we have userspace which relies on using these.
230 * The wait interface is backwards compat new
231 * code should use the NEW_WAIT defines below
232 * THESE ARE NOT BIT FIELDS
233 */
234# define R300_WAIT_2D_CLEAN 0x3
235# define R300_WAIT_3D_CLEAN 0x4
236
237# define R300_NEW_WAIT_2D_3D 0x3
238# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
239# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
240# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
241
242#define R300_CMD_SCRATCH 8
243#define R300_CMD_R500FP 9
244
245typedef union {
246 unsigned int u;
247 struct {
248 unsigned char cmd_type, pad0, pad1, pad2;
249 } header;
250 struct {
251 unsigned char cmd_type, count, reglo, reghi;
252 } packet0;
253 struct {
254 unsigned char cmd_type, count, adrlo, adrhi;
255 } vpu;
256 struct {
257 unsigned char cmd_type, packet, pad0, pad1;
258 } packet3;
259 struct {
260 unsigned char cmd_type, packet;
261 unsigned short count; /* amount of packet2 to emit */
262 } delay;
263 struct {
264 unsigned char cmd_type, buf_idx, pad0, pad1;
265 } dma;
266 struct {
267 unsigned char cmd_type, flags, pad0, pad1;
268 } wait;
269 struct {
270 unsigned char cmd_type, reg, n_bufs, flags;
271 } scratch;
272 struct {
273 unsigned char cmd_type, count, adrlo, adrhi_flags;
274 } r500fp;
275} drm_r300_cmd_header_t;
276
277#define RADEON_FRONT 0x1
278#define RADEON_BACK 0x2
279#define RADEON_DEPTH 0x4
280#define RADEON_STENCIL 0x8
281#define RADEON_CLEAR_FASTZ 0x80000000
282#define RADEON_USE_HIERZ 0x40000000
283#define RADEON_USE_COMP_ZBUF 0x20000000
284
285#define R500FP_CONSTANT_TYPE (1 << 1)
286#define R500FP_CONSTANT_CLAMP (1 << 2)
287
288/* Primitive types
289 */
290#define RADEON_POINTS 0x1
291#define RADEON_LINES 0x2
292#define RADEON_LINE_STRIP 0x3
293#define RADEON_TRIANGLES 0x4
294#define RADEON_TRIANGLE_FAN 0x5
295#define RADEON_TRIANGLE_STRIP 0x6
296
297/* Vertex/indirect buffer size
298 */
299#define RADEON_BUFFER_SIZE 65536
300
301/* Byte offsets for indirect buffer data
302 */
303#define RADEON_INDEX_PRIM_OFFSET 20
304
305#define RADEON_SCRATCH_REG_OFFSET 32
306
307#define RADEON_NR_SAREA_CLIPRECTS 12
308
309/* There are 2 heaps (local/GART). Each region within a heap is a
310 * minimum of 64k, and there are at most 64 of them per heap.
311 */
312#define RADEON_LOCAL_TEX_HEAP 0
313#define RADEON_GART_TEX_HEAP 1
314#define RADEON_NR_TEX_HEAPS 2
315#define RADEON_NR_TEX_REGIONS 64
316#define RADEON_LOG_TEX_GRANULARITY 16
317
318#define RADEON_MAX_TEXTURE_LEVELS 12
319#define RADEON_MAX_TEXTURE_UNITS 3
320
321#define RADEON_MAX_SURFACES 8
322
323/* Blits have strict offset rules. All blit offset must be aligned on
324 * a 1K-byte boundary.
325 */
326#define RADEON_OFFSET_SHIFT 10
327#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
328#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
329
330#endif /* __RADEON_SAREA_DEFINES__ */
331
332typedef struct {
333 unsigned int red;
334 unsigned int green;
335 unsigned int blue;
336 unsigned int alpha;
337} radeon_color_regs_t;
338
339typedef struct {
340 /* Context state */
341 unsigned int pp_misc; /* 0x1c14 */
342 unsigned int pp_fog_color;
343 unsigned int re_solid_color;
344 unsigned int rb3d_blendcntl;
345 unsigned int rb3d_depthoffset;
346 unsigned int rb3d_depthpitch;
347 unsigned int rb3d_zstencilcntl;
348
349 unsigned int pp_cntl; /* 0x1c38 */
350 unsigned int rb3d_cntl;
351 unsigned int rb3d_coloroffset;
352 unsigned int re_width_height;
353 unsigned int rb3d_colorpitch;
354 unsigned int se_cntl;
355
356 /* Vertex format state */
357 unsigned int se_coord_fmt; /* 0x1c50 */
358
359 /* Line state */
360 unsigned int re_line_pattern; /* 0x1cd0 */
361 unsigned int re_line_state;
362
363 unsigned int se_line_width; /* 0x1db8 */
364
365 /* Bumpmap state */
366 unsigned int pp_lum_matrix; /* 0x1d00 */
367
368 unsigned int pp_rot_matrix_0; /* 0x1d58 */
369 unsigned int pp_rot_matrix_1;
370
371 /* Mask state */
372 unsigned int rb3d_stencilrefmask; /* 0x1d7c */
373 unsigned int rb3d_ropcntl;
374 unsigned int rb3d_planemask;
375
376 /* Viewport state */
377 unsigned int se_vport_xscale; /* 0x1d98 */
378 unsigned int se_vport_xoffset;
379 unsigned int se_vport_yscale;
380 unsigned int se_vport_yoffset;
381 unsigned int se_vport_zscale;
382 unsigned int se_vport_zoffset;
383
384 /* Setup state */
385 unsigned int se_cntl_status; /* 0x2140 */
386
387 /* Misc state */
388 unsigned int re_top_left; /* 0x26c0 */
389 unsigned int re_misc;
390} drm_radeon_context_regs_t;
391
392typedef struct {
393 /* Zbias state */
394 unsigned int se_zbias_factor; /* 0x1dac */
395 unsigned int se_zbias_constant;
396} drm_radeon_context2_regs_t;
397
398/* Setup registers for each texture unit
399 */
400typedef struct {
401 unsigned int pp_txfilter;
402 unsigned int pp_txformat;
403 unsigned int pp_txoffset;
404 unsigned int pp_txcblend;
405 unsigned int pp_txablend;
406 unsigned int pp_tfactor;
407 unsigned int pp_border_color;
408} drm_radeon_texture_regs_t;
409
410typedef struct {
411 unsigned int start;
412 unsigned int finish;
413 unsigned int prim:8;
414 unsigned int stateidx:8;
415 unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
416 unsigned int vc_format; /* vertex format */
417} drm_radeon_prim_t;
418
419typedef struct {
420 drm_radeon_context_regs_t context;
421 drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
422 drm_radeon_context2_regs_t context2;
423 unsigned int dirty;
424} drm_radeon_state_t;
425
426typedef struct {
427 /* The channel for communication of state information to the
428 * kernel on firing a vertex buffer with either of the
429 * obsoleted vertex/index ioctls.
430 */
431 drm_radeon_context_regs_t context_state;
432 drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
433 unsigned int dirty;
434 unsigned int vertsize;
435 unsigned int vc_format;
436
437 /* The current cliprects, or a subset thereof.
438 */
439 struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
440 unsigned int nbox;
441
442 /* Counters for client-side throttling of rendering clients.
443 */
444 unsigned int last_frame;
445 unsigned int last_dispatch;
446 unsigned int last_clear;
447
448 struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
449 1];
450 unsigned int tex_age[RADEON_NR_TEX_HEAPS];
451 int ctx_owner;
452 int pfState; /* number of 3d windows (0,1,2ormore) */
453 int pfCurrentPage; /* which buffer is being displayed? */
454 int crtc2_base; /* CRTC2 frame offset */
455 int tiling_enabled; /* set by drm, read by 2d + 3d clients */
456} drm_radeon_sarea_t;
457
458/* WARNING: If you change any of these defines, make sure to change the
459 * defines in the Xserver file (xf86drmRadeon.h)
460 *
461 * KW: actually it's illegal to change any of this (backwards compatibility).
462 */
463
464/* Radeon specific ioctls
465 * The device specific ioctl range is 0x40 to 0x79.
466 */
467#define DRM_RADEON_CP_INIT 0x00
468#define DRM_RADEON_CP_START 0x01
469#define DRM_RADEON_CP_STOP 0x02
470#define DRM_RADEON_CP_RESET 0x03
471#define DRM_RADEON_CP_IDLE 0x04
472#define DRM_RADEON_RESET 0x05
473#define DRM_RADEON_FULLSCREEN 0x06
474#define DRM_RADEON_SWAP 0x07
475#define DRM_RADEON_CLEAR 0x08
476#define DRM_RADEON_VERTEX 0x09
477#define DRM_RADEON_INDICES 0x0A
478#define DRM_RADEON_NOT_USED
479#define DRM_RADEON_STIPPLE 0x0C
480#define DRM_RADEON_INDIRECT 0x0D
481#define DRM_RADEON_TEXTURE 0x0E
482#define DRM_RADEON_VERTEX2 0x0F
483#define DRM_RADEON_CMDBUF 0x10
484#define DRM_RADEON_GETPARAM 0x11
485#define DRM_RADEON_FLIP 0x12
486#define DRM_RADEON_ALLOC 0x13
487#define DRM_RADEON_FREE 0x14
488#define DRM_RADEON_INIT_HEAP 0x15
489#define DRM_RADEON_IRQ_EMIT 0x16
490#define DRM_RADEON_IRQ_WAIT 0x17
491#define DRM_RADEON_CP_RESUME 0x18
492#define DRM_RADEON_SETPARAM 0x19
493#define DRM_RADEON_SURF_ALLOC 0x1a
494#define DRM_RADEON_SURF_FREE 0x1b
495
496#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
497#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
498#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
499#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
500#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
501#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
502#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
503#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
504#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
505#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
506#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
507#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
508#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
509#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
510#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
511#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
512#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
513#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
514#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
515#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
516#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
517#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
518#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
519#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
520#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
521#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
522#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
523
524typedef struct drm_radeon_init {
525 enum {
526 RADEON_INIT_CP = 0x01,
527 RADEON_CLEANUP_CP = 0x02,
528 RADEON_INIT_R200_CP = 0x03,
529 RADEON_INIT_R300_CP = 0x04
530 } func;
531 unsigned long sarea_priv_offset;
532 int is_pci;
533 int cp_mode;
534 int gart_size;
535 int ring_size;
536 int usec_timeout;
537
538 unsigned int fb_bpp;
539 unsigned int front_offset, front_pitch;
540 unsigned int back_offset, back_pitch;
541 unsigned int depth_bpp;
542 unsigned int depth_offset, depth_pitch;
543
544 unsigned long fb_offset;
545 unsigned long mmio_offset;
546 unsigned long ring_offset;
547 unsigned long ring_rptr_offset;
548 unsigned long buffers_offset;
549 unsigned long gart_textures_offset;
550} drm_radeon_init_t;
551
552typedef struct drm_radeon_cp_stop {
553 int flush;
554 int idle;
555} drm_radeon_cp_stop_t;
556
557typedef struct drm_radeon_fullscreen {
558 enum {
559 RADEON_INIT_FULLSCREEN = 0x01,
560 RADEON_CLEANUP_FULLSCREEN = 0x02
561 } func;
562} drm_radeon_fullscreen_t;
563
564#define CLEAR_X1 0
565#define CLEAR_Y1 1
566#define CLEAR_X2 2
567#define CLEAR_Y2 3
568#define CLEAR_DEPTH 4
569
570typedef union drm_radeon_clear_rect {
571 float f[5];
572 unsigned int ui[5];
573} drm_radeon_clear_rect_t;
574
575typedef struct drm_radeon_clear {
576 unsigned int flags;
577 unsigned int clear_color;
578 unsigned int clear_depth;
579 unsigned int color_mask;
580 unsigned int depth_mask; /* misnamed field: should be stencil */
581 drm_radeon_clear_rect_t __user *depth_boxes;
582} drm_radeon_clear_t;
583
584typedef struct drm_radeon_vertex {
585 int prim;
586 int idx; /* Index of vertex buffer */
587 int count; /* Number of vertices in buffer */
588 int discard; /* Client finished with buffer? */
589} drm_radeon_vertex_t;
590
591typedef struct drm_radeon_indices {
592 int prim;
593 int idx;
594 int start;
595 int end;
596 int discard; /* Client finished with buffer? */
597} drm_radeon_indices_t;
598
599/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
600 * - allows multiple primitives and state changes in a single ioctl
601 * - supports driver change to emit native primitives
602 */
603typedef struct drm_radeon_vertex2 {
604 int idx; /* Index of vertex buffer */
605 int discard; /* Client finished with buffer? */
606 int nr_states;
607 drm_radeon_state_t __user *state;
608 int nr_prims;
609 drm_radeon_prim_t __user *prim;
610} drm_radeon_vertex2_t;
611
612/* v1.3 - obsoletes drm_radeon_vertex2
613 * - allows arbitarily large cliprect list
614 * - allows updating of tcl packet, vector and scalar state
615 * - allows memory-efficient description of state updates
616 * - allows state to be emitted without a primitive
617 * (for clears, ctx switches)
618 * - allows more than one dma buffer to be referenced per ioctl
619 * - supports tcl driver
620 * - may be extended in future versions with new cmd types, packets
621 */
622typedef struct drm_radeon_cmd_buffer {
623 int bufsz;
624 char __user *buf;
625 int nbox;
626 struct drm_clip_rect __user *boxes;
627} drm_radeon_cmd_buffer_t;
628
629typedef struct drm_radeon_tex_image {
630 unsigned int x, y; /* Blit coordinates */
631 unsigned int width, height;
632 const void __user *data;
633} drm_radeon_tex_image_t;
634
635typedef struct drm_radeon_texture {
636 unsigned int offset;
637 int pitch;
638 int format;
639 int width; /* Texture image coordinates */
640 int height;
641 drm_radeon_tex_image_t __user *image;
642} drm_radeon_texture_t;
643
644typedef struct drm_radeon_stipple {
645 unsigned int __user *mask;
646} drm_radeon_stipple_t;
647
648typedef struct drm_radeon_indirect {
649 int idx;
650 int start;
651 int end;
652 int discard;
653} drm_radeon_indirect_t;
654
655/* enum for card type parameters */
656#define RADEON_CARD_PCI 0
657#define RADEON_CARD_AGP 1
658#define RADEON_CARD_PCIE 2
659
660/* 1.3: An ioctl to get parameters that aren't available to the 3d
661 * client any other way.
662 */
663#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
664#define RADEON_PARAM_LAST_FRAME 2
665#define RADEON_PARAM_LAST_DISPATCH 3
666#define RADEON_PARAM_LAST_CLEAR 4
667/* Added with DRM version 1.6. */
668#define RADEON_PARAM_IRQ_NR 5
669#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
670/* Added with DRM version 1.8. */
671#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
672#define RADEON_PARAM_STATUS_HANDLE 8
673#define RADEON_PARAM_SAREA_HANDLE 9
674#define RADEON_PARAM_GART_TEX_HANDLE 10
675#define RADEON_PARAM_SCRATCH_OFFSET 11
676#define RADEON_PARAM_CARD_TYPE 12
677#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
678#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
679#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
680
681typedef struct drm_radeon_getparam {
682 int param;
683 void __user *value;
684} drm_radeon_getparam_t;
685
686/* 1.6: Set up a memory manager for regions of shared memory:
687 */
688#define RADEON_MEM_REGION_GART 1
689#define RADEON_MEM_REGION_FB 2
690
691typedef struct drm_radeon_mem_alloc {
692 int region;
693 int alignment;
694 int size;
695 int __user *region_offset; /* offset from start of fb or GART */
696} drm_radeon_mem_alloc_t;
697
698typedef struct drm_radeon_mem_free {
699 int region;
700 int region_offset;
701} drm_radeon_mem_free_t;
702
703typedef struct drm_radeon_mem_init_heap {
704 int region;
705 int size;
706 int start;
707} drm_radeon_mem_init_heap_t;
708
709/* 1.6: Userspace can request & wait on irq's:
710 */
711typedef struct drm_radeon_irq_emit {
712 int __user *irq_seq;
713} drm_radeon_irq_emit_t;
714
715typedef struct drm_radeon_irq_wait {
716 int irq_seq;
717} drm_radeon_irq_wait_t;
718
719/* 1.10: Clients tell the DRM where they think the framebuffer is located in
720 * the card's address space, via a new generic ioctl to set parameters
721 */
722
723typedef struct drm_radeon_setparam {
724 unsigned int param;
725 int64_t value;
726} drm_radeon_setparam_t;
727
728#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
729#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
730#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
731#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
732#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
733#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
734/* 1.14: Clients can allocate/free a surface
735 */
736typedef struct drm_radeon_surface_alloc {
737 unsigned int address;
738 unsigned int size;
739 unsigned int flags;
740} drm_radeon_surface_alloc_t;
741
742typedef struct drm_radeon_surface_free {
743 unsigned int address;
744} drm_radeon_surface_free_t;
745
746#define DRM_RADEON_VBLANK_CRTC1 1
747#define DRM_RADEON_VBLANK_CRTC2 2
748
749#endif
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
deleted file mode 100644
index 8a576ef01821..000000000000
--- a/drivers/char/drm/savage_drm.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/* savage_drm.h -- Public header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__
28
29#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__
31
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
33 * regions, subject to a minimum region size of (1<<16) == 64k.
34 *
35 * Clients may subdivide regions internally, but when sharing between
36 * clients, the region size is the minimum granularity.
37 */
38
39#define SAVAGE_CARD_HEAP 0
40#define SAVAGE_AGP_HEAP 1
41#define SAVAGE_NR_TEX_HEAPS 2
42#define SAVAGE_NR_TEX_REGIONS 16
43#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
44
45#endif /* __SAVAGE_SAREA_DEFINES__ */
46
47typedef struct _drm_savage_sarea {
48 /* LRU lists for texture memory in agp space and on the card.
49 */
50 struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
51 1];
52 unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
53
54 /* Mechanism to validate card state.
55 */
56 int ctxOwner;
57} drm_savage_sarea_t, *drm_savage_sarea_ptr;
58
59/* Savage-specific ioctls
60 */
61#define DRM_SAVAGE_BCI_INIT 0x00
62#define DRM_SAVAGE_BCI_CMDBUF 0x01
63#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
64#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
65
66#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
67#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
68#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
69#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
70
71#define SAVAGE_DMA_PCI 1
72#define SAVAGE_DMA_AGP 3
73typedef struct drm_savage_init {
74 enum {
75 SAVAGE_INIT_BCI = 1,
76 SAVAGE_CLEANUP_BCI = 2
77 } func;
78 unsigned int sarea_priv_offset;
79
80 /* some parameters */
81 unsigned int cob_size;
82 unsigned int bci_threshold_lo, bci_threshold_hi;
83 unsigned int dma_type;
84
85 /* frame buffer layout */
86 unsigned int fb_bpp;
87 unsigned int front_offset, front_pitch;
88 unsigned int back_offset, back_pitch;
89 unsigned int depth_bpp;
90 unsigned int depth_offset, depth_pitch;
91
92 /* local textures */
93 unsigned int texture_offset;
94 unsigned int texture_size;
95
96 /* physical locations of non-permanent maps */
97 unsigned long status_offset;
98 unsigned long buffers_offset;
99 unsigned long agp_textures_offset;
100 unsigned long cmd_dma_offset;
101} drm_savage_init_t;
102
103typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
104typedef struct drm_savage_cmdbuf {
105 /* command buffer in client's address space */
106 drm_savage_cmd_header_t __user *cmd_addr;
107 unsigned int size; /* size of the command buffer in 64bit units */
108
109 unsigned int dma_idx; /* DMA buffer index to use */
110 int discard; /* discard DMA buffer when done */
111 /* vertex buffer in client's address space */
112 unsigned int __user *vb_addr;
113 unsigned int vb_size; /* size of client vertex buffer in bytes */
114 unsigned int vb_stride; /* stride of vertices in 32bit words */
115 /* boxes in client's address space */
116 struct drm_clip_rect __user *box_addr;
117 unsigned int nbox; /* number of clipping boxes */
118} drm_savage_cmdbuf_t;
119
120#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
121#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
122#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
123typedef struct drm_savage_event {
124 unsigned int count;
125 unsigned int flags;
126} drm_savage_event_emit_t, drm_savage_event_wait_t;
127
128/* Commands for the cmdbuf ioctl
129 */
130#define SAVAGE_CMD_STATE 0 /* a range of state registers */
131#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
132#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
133#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
134#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
135#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
136#define SAVAGE_CMD_SWAP 6 /* swap buffers */
137
138/* Primitive types
139*/
140#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
141#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
142#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
143#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
144 * shading on s3d */
145
146/* Skip flags (vertex format)
147 */
148#define SAVAGE_SKIP_Z 0x01
149#define SAVAGE_SKIP_W 0x02
150#define SAVAGE_SKIP_C0 0x04
151#define SAVAGE_SKIP_C1 0x08
152#define SAVAGE_SKIP_S0 0x10
153#define SAVAGE_SKIP_T0 0x20
154#define SAVAGE_SKIP_ST0 0x30
155#define SAVAGE_SKIP_S1 0x40
156#define SAVAGE_SKIP_T1 0x80
157#define SAVAGE_SKIP_ST1 0xc0
158#define SAVAGE_SKIP_ALL_S3D 0x3f
159#define SAVAGE_SKIP_ALL_S4 0xff
160
161/* Buffer names for clear command
162 */
163#define SAVAGE_FRONT 0x1
164#define SAVAGE_BACK 0x2
165#define SAVAGE_DEPTH 0x4
166
167/* 64-bit command header
168 */
169union drm_savage_cmd_header {
170 struct {
171 unsigned char cmd; /* command */
172 unsigned char pad0;
173 unsigned short pad1;
174 unsigned short pad2;
175 unsigned short pad3;
176 } cmd; /* generic */
177 struct {
178 unsigned char cmd;
179 unsigned char global; /* need idle engine? */
180 unsigned short count; /* number of consecutive registers */
181 unsigned short start; /* first register */
182 unsigned short pad3;
183 } state; /* SAVAGE_CMD_STATE */
184 struct {
185 unsigned char cmd;
186 unsigned char prim; /* primitive type */
187 unsigned short skip; /* vertex format (skip flags) */
188 unsigned short count; /* number of vertices */
189 unsigned short start; /* first vertex in DMA/vertex buffer */
190 } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
191 struct {
192 unsigned char cmd;
193 unsigned char prim;
194 unsigned short skip;
195 unsigned short count; /* number of indices that follow */
196 unsigned short pad3;
197 } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
198 struct {
199 unsigned char cmd;
200 unsigned char pad0;
201 unsigned short pad1;
202 unsigned int flags;
203 } clear0; /* SAVAGE_CMD_CLEAR */
204 struct {
205 unsigned int mask;
206 unsigned int value;
207 } clear1; /* SAVAGE_CMD_CLEAR data */
208};
209
210#endif
diff --git a/drivers/char/drm/sis_drm.h b/drivers/char/drm/sis_drm.h
deleted file mode 100644
index 30f7b3827466..000000000000
--- a/drivers/char/drm/sis_drm.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
2/*
3 * Copyright 2005 Eric Anholt
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27#ifndef __SIS_DRM_H__
28#define __SIS_DRM_H__
29
30/* SiS specific ioctls */
31#define NOT_USED_0_3
32#define DRM_SIS_FB_ALLOC 0x04
33#define DRM_SIS_FB_FREE 0x05
34#define NOT_USED_6_12
35#define DRM_SIS_AGP_INIT 0x13
36#define DRM_SIS_AGP_ALLOC 0x14
37#define DRM_SIS_AGP_FREE 0x15
38#define DRM_SIS_FB_INIT 0x16
39
40#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
41#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
42#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
43#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
44#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
45#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
46/*
47#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
48#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
49#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
50*/
51
52typedef struct {
53 int context;
54 unsigned int offset;
55 unsigned int size;
56 unsigned long free;
57} drm_sis_mem_t;
58
59typedef struct {
60 unsigned int offset, size;
61} drm_sis_agp_t;
62
63typedef struct {
64 unsigned int offset, size;
65} drm_sis_fb_t;
66
67#endif /* __SIS_DRM_H__ */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
deleted file mode 100644
index a3b5c102b067..000000000000
--- a/drivers/char/drm/via_drm.h
+++ /dev/null
@@ -1,275 +0,0 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_
26
27/* WARNING: These defines must be the same as what the Xserver uses.
28 * if you change them, you must change the defines in the Xserver.
29 */
30
31#ifndef _VIA_DEFINES_
32#define _VIA_DEFINES_
33
34#ifndef __KERNEL__
35#include "via_drmclient.h"
36#endif
37
38#define VIA_NR_SAREA_CLIPRECTS 8
39#define VIA_NR_XVMC_PORTS 10
40#define VIA_NR_XVMC_LOCKS 5
41#define VIA_MAX_CACHELINE_SIZE 64
42#define XVMCLOCKPTR(saPriv,lockNo) \
43 ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
44 (VIA_MAX_CACHELINE_SIZE - 1)) & \
45 ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
46 VIA_MAX_CACHELINE_SIZE*(lockNo)))
47
48/* Each region is a minimum of 64k, and there are at most 64 of them.
49 */
50#define VIA_NR_TEX_REGIONS 64
51#define VIA_LOG_MIN_TEX_REGION_SIZE 16
52#endif
53
54#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
55#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
56#define VIA_UPLOAD_CTX 0x4
57#define VIA_UPLOAD_BUFFERS 0x8
58#define VIA_UPLOAD_TEX0 0x10
59#define VIA_UPLOAD_TEX1 0x20
60#define VIA_UPLOAD_CLIPRECTS 0x40
61#define VIA_UPLOAD_ALL 0xff
62
63/* VIA specific ioctls */
64#define DRM_VIA_ALLOCMEM 0x00
65#define DRM_VIA_FREEMEM 0x01
66#define DRM_VIA_AGP_INIT 0x02
67#define DRM_VIA_FB_INIT 0x03
68#define DRM_VIA_MAP_INIT 0x04
69#define DRM_VIA_DEC_FUTEX 0x05
70#define NOT_USED
71#define DRM_VIA_DMA_INIT 0x07
72#define DRM_VIA_CMDBUFFER 0x08
73#define DRM_VIA_FLUSH 0x09
74#define DRM_VIA_PCICMD 0x0a
75#define DRM_VIA_CMDBUF_SIZE 0x0b
76#define NOT_USED
77#define DRM_VIA_WAIT_IRQ 0x0d
78#define DRM_VIA_DMA_BLIT 0x0e
79#define DRM_VIA_BLIT_SYNC 0x0f
80
81#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
82#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
83#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
84#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
85#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
86#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
87#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
88#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
89#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
90#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
91#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
92 drm_via_cmdbuf_size_t)
93#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
94#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
95#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
96
97/* Indices into buf.Setup where various bits of state are mirrored per
98 * context and per buffer. These can be fired at the card as a unit,
99 * or in a piecewise fashion as required.
100 */
101
102#define VIA_TEX_SETUP_SIZE 8
103
104/* Flags for clear ioctl
105 */
106#define VIA_FRONT 0x1
107#define VIA_BACK 0x2
108#define VIA_DEPTH 0x4
109#define VIA_STENCIL 0x8
110#define VIA_MEM_VIDEO 0 /* matches drm constant */
111#define VIA_MEM_AGP 1 /* matches drm constant */
112#define VIA_MEM_SYSTEM 2
113#define VIA_MEM_MIXED 3
114#define VIA_MEM_UNKNOWN 4
115
116typedef struct {
117 uint32_t offset;
118 uint32_t size;
119} drm_via_agp_t;
120
121typedef struct {
122 uint32_t offset;
123 uint32_t size;
124} drm_via_fb_t;
125
126typedef struct {
127 uint32_t context;
128 uint32_t type;
129 uint32_t size;
130 unsigned long index;
131 unsigned long offset;
132} drm_via_mem_t;
133
134typedef struct _drm_via_init {
135 enum {
136 VIA_INIT_MAP = 0x01,
137 VIA_CLEANUP_MAP = 0x02
138 } func;
139
140 unsigned long sarea_priv_offset;
141 unsigned long fb_offset;
142 unsigned long mmio_offset;
143 unsigned long agpAddr;
144} drm_via_init_t;
145
146typedef struct _drm_via_futex {
147 enum {
148 VIA_FUTEX_WAIT = 0x00,
149 VIA_FUTEX_WAKE = 0X01
150 } func;
151 uint32_t ms;
152 uint32_t lock;
153 uint32_t val;
154} drm_via_futex_t;
155
156typedef struct _drm_via_dma_init {
157 enum {
158 VIA_INIT_DMA = 0x01,
159 VIA_CLEANUP_DMA = 0x02,
160 VIA_DMA_INITIALIZED = 0x03
161 } func;
162
163 unsigned long offset;
164 unsigned long size;
165 unsigned long reg_pause_addr;
166} drm_via_dma_init_t;
167
168typedef struct _drm_via_cmdbuffer {
169 char __user *buf;
170 unsigned long size;
171} drm_via_cmdbuffer_t;
172
173/* Warning: If you change the SAREA structure you must change the Xserver
174 * structure as well */
175
176typedef struct _drm_via_tex_region {
177 unsigned char next, prev; /* indices to form a circular LRU */
178 unsigned char inUse; /* owned by a client, or free? */
179 int age; /* tracked by clients to update local LRU's */
180} drm_via_tex_region_t;
181
182typedef struct _drm_via_sarea {
183 unsigned int dirty;
184 unsigned int nbox;
185 struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
186 drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
187 int texAge; /* last time texture was uploaded */
188 int ctxOwner; /* last context to upload state */
189 int vertexPrim;
190
191 /*
192 * Below is for XvMC.
193 * We want the lock integers alone on, and aligned to, a cache line.
194 * Therefore this somewhat strange construct.
195 */
196
197 char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
198
199 unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
200 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
201 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
202
203 /* Used by the 3d driver only at this point, for pageflipping:
204 */
205 unsigned int pfCurrentOffset;
206} drm_via_sarea_t;
207
208typedef struct _drm_via_cmdbuf_size {
209 enum {
210 VIA_CMDBUF_SPACE = 0x01,
211 VIA_CMDBUF_LAG = 0x02
212 } func;
213 int wait;
214 uint32_t size;
215} drm_via_cmdbuf_size_t;
216
217typedef enum {
218 VIA_IRQ_ABSOLUTE = 0x0,
219 VIA_IRQ_RELATIVE = 0x1,
220 VIA_IRQ_SIGNAL = 0x10000000,
221 VIA_IRQ_FORCE_SEQUENCE = 0x20000000
222} via_irq_seq_type_t;
223
224#define VIA_IRQ_FLAGS_MASK 0xF0000000
225
226enum drm_via_irqs {
227 drm_via_irq_hqv0 = 0,
228 drm_via_irq_hqv1,
229 drm_via_irq_dma0_dd,
230 drm_via_irq_dma0_td,
231 drm_via_irq_dma1_dd,
232 drm_via_irq_dma1_td,
233 drm_via_irq_num
234};
235
236struct drm_via_wait_irq_request {
237 unsigned irq;
238 via_irq_seq_type_t type;
239 uint32_t sequence;
240 uint32_t signal;
241};
242
243typedef union drm_via_irqwait {
244 struct drm_via_wait_irq_request request;
245 struct drm_wait_vblank_reply reply;
246} drm_via_irqwait_t;
247
248typedef struct drm_via_blitsync {
249 uint32_t sync_handle;
250 unsigned engine;
251} drm_via_blitsync_t;
252
253/* - * Below,"flags" is currently unused but will be used for possible future
254 * extensions like kernel space bounce buffers for bad alignments and
255 * blit engine busy-wait polling for better latency in the absence of
256 * interrupts.
257 */
258
259typedef struct drm_via_dmablit {
260 uint32_t num_lines;
261 uint32_t line_length;
262
263 uint32_t fb_addr;
264 uint32_t fb_stride;
265
266 unsigned char *mem_addr;
267 uint32_t mem_stride;
268
269 uint32_t flags;
270 int to_fb;
271
272 drm_via_blitsync_t sync;
273} drm_via_dmablit_t;
274
275#endif /* _VIA_DRM_H_ */
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
index ea35ab2c9909..fb584938c9c3 100644
--- a/drivers/char/ds1286.c
+++ b/drivers/char/ds1286.c
@@ -27,6 +27,7 @@
27 * option) any later version. 27 * option) any later version.
28 */ 28 */
29#include <linux/ds1286.h> 29#include <linux/ds1286.h>
30#include <linux/smp_lock.h>
30#include <linux/types.h> 31#include <linux/types.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
@@ -252,6 +253,7 @@ static int ds1286_ioctl(struct inode *inode, struct file *file,
252 253
253static int ds1286_open(struct inode *inode, struct file *file) 254static int ds1286_open(struct inode *inode, struct file *file)
254{ 255{
256 lock_kernel();
255 spin_lock_irq(&ds1286_lock); 257 spin_lock_irq(&ds1286_lock);
256 258
257 if (ds1286_status & RTC_IS_OPEN) 259 if (ds1286_status & RTC_IS_OPEN)
@@ -260,10 +262,12 @@ static int ds1286_open(struct inode *inode, struct file *file)
260 ds1286_status |= RTC_IS_OPEN; 262 ds1286_status |= RTC_IS_OPEN;
261 263
262 spin_unlock_irq(&ds1286_lock); 264 spin_unlock_irq(&ds1286_lock);
265 unlock_kernel();
263 return 0; 266 return 0;
264 267
265out_busy: 268out_busy:
266 spin_lock_irq(&ds1286_lock); 269 spin_lock_irq(&ds1286_lock);
270 unlock_kernel();
267 return -EBUSY; 271 return -EBUSY;
268} 272}
269 273
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 334ad5bbe6b6..34275c6f1da2 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -8,6 +8,7 @@
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
9#include <linux/capability.h> 9#include <linux/capability.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp_lock.h>
11 12
12#include <asm/hardware.h> 13#include <asm/hardware.h>
13#include <asm/mach-types.h> 14#include <asm/mach-types.h>
@@ -208,6 +209,12 @@ static void ds1620_read_state(struct therm *therm)
208 therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9)); 209 therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9));
209} 210}
210 211
212static int ds1620_open(struct inode *inode, struct file *file)
213{
214 cycle_kernel_lock();
215 return nonseekable_open(inode, file);
216}
217
211static ssize_t 218static ssize_t
212ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) 219ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
213{ 220{
@@ -336,7 +343,7 @@ static struct proc_dir_entry *proc_therm_ds1620;
336 343
337static const struct file_operations ds1620_fops = { 344static const struct file_operations ds1620_fops = {
338 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
339 .open = nonseekable_open, 346 .open = ds1620_open,
340 .read = ds1620_read, 347 .read = ds1620_read,
341 .ioctl = ds1620_ioctl, 348 .ioctl = ds1620_ioctl,
342}; 349};
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index a69c65283260..7bf7485377e6 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -33,6 +33,7 @@
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/smp_lock.h>
36 37
37#include <asm/atarihw.h> 38#include <asm/atarihw.h>
38#include <asm/traps.h> 39#include <asm/traps.h>
@@ -436,13 +437,17 @@ static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
436static int dsp56k_open(struct inode *inode, struct file *file) 437static int dsp56k_open(struct inode *inode, struct file *file)
437{ 438{
438 int dev = iminor(inode) & 0x0f; 439 int dev = iminor(inode) & 0x0f;
440 int ret = 0;
439 441
442 lock_kernel();
440 switch(dev) 443 switch(dev)
441 { 444 {
442 case DSP56K_DEV_56001: 445 case DSP56K_DEV_56001:
443 446
444 if (test_and_set_bit(0, &dsp56k.in_use)) 447 if (test_and_set_bit(0, &dsp56k.in_use)) {
445 return -EBUSY; 448 ret = -EBUSY;
449 goto out;
450 }
446 451
447 dsp56k.timeout = TIMEOUT; 452 dsp56k.timeout = TIMEOUT;
448 dsp56k.maxio = MAXIO; 453 dsp56k.maxio = MAXIO;
@@ -458,10 +463,11 @@ static int dsp56k_open(struct inode *inode, struct file *file)
458 break; 463 break;
459 464
460 default: 465 default:
461 return -ENODEV; 466 ret = -ENODEV;
462 } 467 }
463 468out:
464 return 0; 469 unlock_kernel();
470 return ret;
465} 471}
466 472
467static int dsp56k_release(struct inode *inode, struct file *file) 473static int dsp56k_release(struct inode *inode, struct file *file)
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index abde6ddefe69..6b900b297cc6 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -56,6 +56,7 @@
56#include <linux/errno.h> /* for -EBUSY */ 56#include <linux/errno.h> /* for -EBUSY */
57#include <linux/ioport.h> /* for request_region */ 57#include <linux/ioport.h> /* for request_region */
58#include <linux/delay.h> /* for loops_per_jiffy */ 58#include <linux/delay.h> /* for loops_per_jiffy */
59#include <linux/smp_lock.h> /* cycle_kernel_lock() */
59#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ 60#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
60#include <asm/uaccess.h> /* for get_user, etc. */ 61#include <asm/uaccess.h> /* for get_user, etc. */
61#include <linux/wait.h> /* for wait_queue */ 62#include <linux/wait.h> /* for wait_queue */
@@ -288,10 +289,12 @@ static int dtlk_ioctl(struct inode *inode,
288 } 289 }
289} 290}
290 291
292/* Note that nobody ever sets dtlk_busy... */
291static int dtlk_open(struct inode *inode, struct file *file) 293static int dtlk_open(struct inode *inode, struct file *file)
292{ 294{
293 TRACE_TEXT("(dtlk_open"); 295 TRACE_TEXT("(dtlk_open");
294 296
297 cycle_kernel_lock();
295 nonseekable_open(inode, file); 298 nonseekable_open(inode, file);
296 switch (iminor(inode)) { 299 switch (iminor(inode)) {
297 case DTLK_MINOR: 300 case DTLK_MINOR:
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 49233f589874..d57ca3e4e534 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30 30
31#include <linux/smp_lock.h>
31#include <linux/types.h> 32#include <linux/types.h>
32#include <linux/errno.h> 33#include <linux/errno.h>
33#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
@@ -272,6 +273,7 @@ efi_rtc_open(struct inode *inode, struct file *file)
272 * We do accept multiple open files at the same time as we 273 * We do accept multiple open files at the same time as we
273 * synchronize on the per call operation. 274 * synchronize on the per call operation.
274 */ 275 */
276 cycle_kernel_lock();
275 return 0; 277 return 0;
276} 278}
277 279
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 69f0a2993af0..aac0985a572b 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -51,6 +51,7 @@
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/poll.h> 52#include <linux/poll.h>
53#include <linux/proc_fs.h> 53#include <linux/proc_fs.h>
54#include <linux/smp_lock.h>
54#include <linux/workqueue.h> 55#include <linux/workqueue.h>
55 56
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
@@ -338,12 +339,16 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
338 339
339static int gen_rtc_open(struct inode *inode, struct file *file) 340static int gen_rtc_open(struct inode *inode, struct file *file)
340{ 341{
341 if (gen_rtc_status & RTC_IS_OPEN) 342 lock_kernel();
343 if (gen_rtc_status & RTC_IS_OPEN) {
344 unlock_kernel();
342 return -EBUSY; 345 return -EBUSY;
346 }
343 347
344 gen_rtc_status |= RTC_IS_OPEN; 348 gen_rtc_status |= RTC_IS_OPEN;
345 gen_rtc_irq_data = 0; 349 gen_rtc_irq_data = 0;
346 irq_active = 0; 350 irq_active = 0;
351 unlock_kernel();
347 352
348 return 0; 353 return 0;
349} 354}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e7fb0bca3667..fb0a85a1eb36 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/smp_lock.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
19#include <linux/major.h> 20#include <linux/major.h>
@@ -193,6 +194,7 @@ static int hpet_open(struct inode *inode, struct file *file)
193 if (file->f_mode & FMODE_WRITE) 194 if (file->f_mode & FMODE_WRITE)
194 return -EINVAL; 195 return -EINVAL;
195 196
197 lock_kernel();
196 spin_lock_irq(&hpet_lock); 198 spin_lock_irq(&hpet_lock);
197 199
198 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 200 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
@@ -207,6 +209,7 @@ static int hpet_open(struct inode *inode, struct file *file)
207 209
208 if (!devp) { 210 if (!devp) {
209 spin_unlock_irq(&hpet_lock); 211 spin_unlock_irq(&hpet_lock);
212 unlock_kernel();
210 return -EBUSY; 213 return -EBUSY;
211 } 214 }
212 215
@@ -214,6 +217,7 @@ static int hpet_open(struct inode *inode, struct file *file)
214 devp->hd_irqdata = 0; 217 devp->hd_irqdata = 0;
215 devp->hd_flags |= HPET_OPEN; 218 devp->hd_flags |= HPET_OPEN;
216 spin_unlock_irq(&hpet_lock); 219 spin_unlock_irq(&hpet_lock);
220 unlock_kernel();
217 221
218 return 0; 222 return 0;
219} 223}
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index dd68f8541c2d..db2ae4216279 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -39,9 +39,14 @@ static int xencons_irq;
39 39
40/* ------------------------------------------------------------------ */ 40/* ------------------------------------------------------------------ */
41 41
42static unsigned long console_pfn = ~0ul;
43
42static inline struct xencons_interface *xencons_interface(void) 44static inline struct xencons_interface *xencons_interface(void)
43{ 45{
44 return mfn_to_virt(xen_start_info->console.domU.mfn); 46 if (console_pfn == ~0ul)
47 return mfn_to_virt(xen_start_info->console.domU.mfn);
48 else
49 return __va(console_pfn << PAGE_SHIFT);
45} 50}
46 51
47static inline void notify_daemon(void) 52static inline void notify_daemon(void)
@@ -101,20 +106,32 @@ static int __init xen_init(void)
101{ 106{
102 struct hvc_struct *hp; 107 struct hvc_struct *hp;
103 108
104 if (!is_running_on_xen()) 109 if (!is_running_on_xen() ||
105 return 0; 110 is_initial_xendomain() ||
111 !xen_start_info->console.domU.evtchn)
112 return -ENODEV;
106 113
107 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); 114 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
108 if (xencons_irq < 0) 115 if (xencons_irq < 0)
109 xencons_irq = 0 /* NO_IRQ */; 116 xencons_irq = 0; /* NO_IRQ */
117
110 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256); 118 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
111 if (IS_ERR(hp)) 119 if (IS_ERR(hp))
112 return PTR_ERR(hp); 120 return PTR_ERR(hp);
113 121
114 hvc = hp; 122 hvc = hp;
123
124 console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);
125
115 return 0; 126 return 0;
116} 127}
117 128
129void xen_console_resume(void)
130{
131 if (xencons_irq)
132 rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq);
133}
134
118static void __exit xen_fini(void) 135static void __exit xen_fini(void)
119{ 136{
120 if (hvc) 137 if (hvc)
@@ -134,12 +151,28 @@ module_init(xen_init);
134module_exit(xen_fini); 151module_exit(xen_fini);
135console_initcall(xen_cons_init); 152console_initcall(xen_cons_init);
136 153
154static void raw_console_write(const char *str, int len)
155{
156 while(len > 0) {
157 int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
158 if (rc <= 0)
159 break;
160
161 str += rc;
162 len -= rc;
163 }
164}
165
166#ifdef CONFIG_EARLY_PRINTK
137static void xenboot_write_console(struct console *console, const char *string, 167static void xenboot_write_console(struct console *console, const char *string,
138 unsigned len) 168 unsigned len)
139{ 169{
140 unsigned int linelen, off = 0; 170 unsigned int linelen, off = 0;
141 const char *pos; 171 const char *pos;
142 172
173 raw_console_write(string, len);
174
175 write_console(0, "(early) ", 8);
143 while (off < len && NULL != (pos = strchr(string+off, '\n'))) { 176 while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
144 linelen = pos-string+off; 177 linelen = pos-string+off;
145 if (off + linelen > len) 178 if (off + linelen > len)
@@ -155,5 +188,23 @@ static void xenboot_write_console(struct console *console, const char *string,
155struct console xenboot_console = { 188struct console xenboot_console = {
156 .name = "xenboot", 189 .name = "xenboot",
157 .write = xenboot_write_console, 190 .write = xenboot_write_console,
158 .flags = CON_PRINTBUFFER | CON_BOOT, 191 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
159}; 192};
193#endif /* CONFIG_EARLY_PRINTK */
194
195void xen_raw_console_write(const char *str)
196{
197 raw_console_write(str, strlen(str));
198}
199
200void xen_raw_printk(const char *fmt, ...)
201{
202 static char buf[512];
203 va_list ap;
204
205 va_start(ap, fmt);
206 vsnprintf(buf, sizeof(buf), fmt, ap);
207 va_end(ap);
208
209 xen_raw_console_write(buf);
210}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 662d60e44e9a..e5d583c84e4f 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -37,6 +37,7 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/smp_lock.h>
40#include <linux/init.h> 41#include <linux/init.h>
41#include <linux/miscdevice.h> 42#include <linux/miscdevice.h>
42#include <linux/delay.h> 43#include <linux/delay.h>
@@ -86,6 +87,7 @@ static int rng_dev_open(struct inode *inode, struct file *filp)
86 return -EINVAL; 87 return -EINVAL;
87 if (filp->f_mode & FMODE_WRITE) 88 if (filp->f_mode & FMODE_WRITE)
88 return -EINVAL; 89 return -EINVAL;
90 cycle_kernel_lock();
89 return 0; 91 return 0;
90} 92}
91 93
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index c12cf8fc4be0..61b6fe4156bb 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -98,6 +98,7 @@
98#include <linux/major.h> 98#include <linux/major.h>
99#include <linux/wait.h> 99#include <linux/wait.h>
100#include <linux/device.h> 100#include <linux/device.h>
101#include <linux/smp_lock.h>
101 102
102#include <linux/tty.h> 103#include <linux/tty.h>
103#include <linux/tty_flip.h> 104#include <linux/tty_flip.h>
@@ -2908,42 +2909,11 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
2908static int 2909static int
2909ip2_ipl_open( struct inode *pInode, struct file *pFile ) 2910ip2_ipl_open( struct inode *pInode, struct file *pFile )
2910{ 2911{
2911 unsigned int iplminor = iminor(pInode);
2912 i2eBordStrPtr pB;
2913 i2ChanStrPtr pCh;
2914 2912
2915#ifdef IP2DEBUG_IPL 2913#ifdef IP2DEBUG_IPL
2916 printk (KERN_DEBUG "IP2IPL: open\n" ); 2914 printk (KERN_DEBUG "IP2IPL: open\n" );
2917#endif 2915#endif
2918 2916 cycle_kernel_lock();
2919 switch(iplminor) {
2920 // These are the IPL devices
2921 case 0:
2922 case 4:
2923 case 8:
2924 case 12:
2925 break;
2926
2927 // These are the status devices
2928 case 1:
2929 case 5:
2930 case 9:
2931 case 13:
2932 break;
2933
2934 // These are the debug devices
2935 case 2:
2936 case 6:
2937 case 10:
2938 case 14:
2939 pB = i2BoardPtrTable[iplminor / 4];
2940 pCh = (i2ChanStrPtr) pB->i2eChannelPtr;
2941 break;
2942
2943 // This is the trace device
2944 case 3:
2945 break;
2946 }
2947 return 0; 2917 return 0;
2948} 2918}
2949 2919
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
index 86e6538a77b0..ec9d0443d92c 100644
--- a/drivers/char/ip27-rtc.c
+++ b/drivers/char/ip27-rtc.c
@@ -27,6 +27,7 @@
27#include <linux/bcd.h> 27#include <linux/bcd.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/smp_lock.h>
30#include <linux/types.h> 31#include <linux/types.h>
31#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
32#include <linux/ioport.h> 33#include <linux/ioport.h>
@@ -163,15 +164,18 @@ static long rtc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
163 164
164static int rtc_open(struct inode *inode, struct file *file) 165static int rtc_open(struct inode *inode, struct file *file)
165{ 166{
167 lock_kernel();
166 spin_lock_irq(&rtc_lock); 168 spin_lock_irq(&rtc_lock);
167 169
168 if (rtc_status & RTC_IS_OPEN) { 170 if (rtc_status & RTC_IS_OPEN) {
169 spin_unlock_irq(&rtc_lock); 171 spin_unlock_irq(&rtc_lock);
172 unlock_kernel();
170 return -EBUSY; 173 return -EBUSY;
171 } 174 }
172 175
173 rtc_status |= RTC_IS_OPEN; 176 rtc_status |= RTC_IS_OPEN;
174 spin_unlock_irq(&rtc_lock); 177 spin_unlock_irq(&rtc_lock);
178 unlock_kernel();
175 179
176 return 0; 180 return 0;
177} 181}
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 0246a2b8ce48..c11a40483459 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -43,6 +43,7 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/device.h> 44#include <linux/device.h>
45#include <linux/compat.h> 45#include <linux/compat.h>
46#include <linux/smp_lock.h>
46 47
47struct ipmi_file_private 48struct ipmi_file_private
48{ 49{
@@ -100,7 +101,9 @@ static int ipmi_fasync(int fd, struct file *file, int on)
100 struct ipmi_file_private *priv = file->private_data; 101 struct ipmi_file_private *priv = file->private_data;
101 int result; 102 int result;
102 103
104 lock_kernel(); /* could race against open() otherwise */
103 result = fasync_helper(fd, file, on, &priv->fasync_queue); 105 result = fasync_helper(fd, file, on, &priv->fasync_queue);
106 unlock_kernel();
104 107
105 return (result); 108 return (result);
106} 109}
@@ -121,6 +124,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
121 if (!priv) 124 if (!priv)
122 return -ENOMEM; 125 return -ENOMEM;
123 126
127 lock_kernel();
124 priv->file = file; 128 priv->file = file;
125 129
126 rv = ipmi_create_user(if_num, 130 rv = ipmi_create_user(if_num,
@@ -129,7 +133,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
129 &(priv->user)); 133 &(priv->user));
130 if (rv) { 134 if (rv) {
131 kfree(priv); 135 kfree(priv);
132 return rv; 136 goto out;
133 } 137 }
134 138
135 file->private_data = priv; 139 file->private_data = priv;
@@ -144,7 +148,9 @@ static int ipmi_open(struct inode *inode, struct file *file)
144 priv->default_retries = -1; 148 priv->default_retries = -1;
145 priv->default_retry_time_ms = 0; 149 priv->default_retry_time_ms = 0;
146 150
147 return 0; 151out:
152 unlock_kernel();
153 return rv;
148} 154}
149 155
150static int ipmi_release(struct inode *inode, struct file *file) 156static int ipmi_release(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0e6df289cb46..235fab0bdf79 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -35,6 +35,7 @@
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/ipmi.h> 36#include <linux/ipmi.h>
37#include <linux/ipmi_smi.h> 37#include <linux/ipmi_smi.h>
38#include <linux/smp_lock.h>
38#include <linux/watchdog.h> 39#include <linux/watchdog.h>
39#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
40#include <linux/init.h> 41#include <linux/init.h>
@@ -818,6 +819,8 @@ static int ipmi_open(struct inode *ino, struct file *filep)
818 if (test_and_set_bit(0, &ipmi_wdog_open)) 819 if (test_and_set_bit(0, &ipmi_wdog_open))
819 return -EBUSY; 820 return -EBUSY;
820 821
822 cycle_kernel_lock();
823
821 /* 824 /*
822 * Don't start the timer now, let it start on the 825 * Don't start the timer now, let it start on the
823 * first heartbeat. 826 * first heartbeat.
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
index 4fe9206f84de..1c29b20e4f4c 100644
--- a/drivers/char/lcd.c
+++ b/drivers/char/lcd.c
@@ -20,6 +20,7 @@
20#include <linux/mc146818rtc.h> 20#include <linux/mc146818rtc.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/smp_lock.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24 25
25#include <asm/io.h> 26#include <asm/io.h>
@@ -414,6 +415,8 @@ static int lcd_ioctl(struct inode *inode, struct file *file,
414 415
415static int lcd_open(struct inode *inode, struct file *file) 416static int lcd_open(struct inode *inode, struct file *file)
416{ 417{
418 cycle_kernel_lock();
419
417 if (!lcd_present) 420 if (!lcd_present)
418 return -ENXIO; 421 return -ENXIO;
419 else 422 else
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 60ac642752be..71abb4c33aa2 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -126,6 +126,7 @@
126#include <linux/device.h> 126#include <linux/device.h>
127#include <linux/wait.h> 127#include <linux/wait.h>
128#include <linux/jiffies.h> 128#include <linux/jiffies.h>
129#include <linux/smp_lock.h>
129 130
130#include <linux/parport.h> 131#include <linux/parport.h>
131#undef LP_STATS 132#undef LP_STATS
@@ -489,14 +490,21 @@ static ssize_t lp_read(struct file * file, char __user * buf,
489static int lp_open(struct inode * inode, struct file * file) 490static int lp_open(struct inode * inode, struct file * file)
490{ 491{
491 unsigned int minor = iminor(inode); 492 unsigned int minor = iminor(inode);
493 int ret = 0;
492 494
493 if (minor >= LP_NO) 495 lock_kernel();
494 return -ENXIO; 496 if (minor >= LP_NO) {
495 if ((LP_F(minor) & LP_EXIST) == 0) 497 ret = -ENXIO;
496 return -ENXIO; 498 goto out;
497 if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) 499 }
498 return -EBUSY; 500 if ((LP_F(minor) & LP_EXIST) == 0) {
499 501 ret = -ENXIO;
502 goto out;
503 }
504 if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) {
505 ret = -EBUSY;
506 goto out;
507 }
500 /* If ABORTOPEN is set and the printer is offline or out of paper, 508 /* If ABORTOPEN is set and the printer is offline or out of paper,
501 we may still want to open it to perform ioctl()s. Therefore we 509 we may still want to open it to perform ioctl()s. Therefore we
502 have commandeered O_NONBLOCK, even though it is being used in 510 have commandeered O_NONBLOCK, even though it is being used in
@@ -510,21 +518,25 @@ static int lp_open(struct inode * inode, struct file * file)
510 if (status & LP_POUTPA) { 518 if (status & LP_POUTPA) {
511 printk(KERN_INFO "lp%d out of paper\n", minor); 519 printk(KERN_INFO "lp%d out of paper\n", minor);
512 LP_F(minor) &= ~LP_BUSY; 520 LP_F(minor) &= ~LP_BUSY;
513 return -ENOSPC; 521 ret = -ENOSPC;
522 goto out;
514 } else if (!(status & LP_PSELECD)) { 523 } else if (!(status & LP_PSELECD)) {
515 printk(KERN_INFO "lp%d off-line\n", minor); 524 printk(KERN_INFO "lp%d off-line\n", minor);
516 LP_F(minor) &= ~LP_BUSY; 525 LP_F(minor) &= ~LP_BUSY;
517 return -EIO; 526 ret = -EIO;
527 goto out;
518 } else if (!(status & LP_PERRORP)) { 528 } else if (!(status & LP_PERRORP)) {
519 printk(KERN_ERR "lp%d printer error\n", minor); 529 printk(KERN_ERR "lp%d printer error\n", minor);
520 LP_F(minor) &= ~LP_BUSY; 530 LP_F(minor) &= ~LP_BUSY;
521 return -EIO; 531 ret = -EIO;
532 goto out;
522 } 533 }
523 } 534 }
524 lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); 535 lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
525 if (!lp_table[minor].lp_buffer) { 536 if (!lp_table[minor].lp_buffer) {
526 LP_F(minor) &= ~LP_BUSY; 537 LP_F(minor) &= ~LP_BUSY;
527 return -ENOMEM; 538 ret = -ENOMEM;
539 goto out;
528 } 540 }
529 /* Determine if the peripheral supports ECP mode */ 541 /* Determine if the peripheral supports ECP mode */
530 lp_claim_parport_or_block (&lp_table[minor]); 542 lp_claim_parport_or_block (&lp_table[minor]);
@@ -540,7 +552,9 @@ static int lp_open(struct inode * inode, struct file * file)
540 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); 552 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
541 lp_release_parport (&lp_table[minor]); 553 lp_release_parport (&lp_table[minor]);
542 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; 554 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
543 return 0; 555out:
556 unlock_kernel();
557 return ret;
544} 558}
545 559
546static int lp_release(struct inode * inode, struct file * file) 560static int lp_release(struct inode * inode, struct file * file)
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index f4716ad7348a..acd8e9ed474a 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/smp_lock.h>
27#include <asm/io.h> 28#include <asm/io.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -382,15 +383,19 @@ static int mbcs_open(struct inode *ip, struct file *fp)
382 struct mbcs_soft *soft; 383 struct mbcs_soft *soft;
383 int minor; 384 int minor;
384 385
386 lock_kernel();
385 minor = iminor(ip); 387 minor = iminor(ip);
386 388
389 /* Nothing protects access to this list... */
387 list_for_each_entry(soft, &soft_list, list) { 390 list_for_each_entry(soft, &soft_list, list) {
388 if (soft->nasid == minor) { 391 if (soft->nasid == minor) {
389 fp->private_data = soft->cxdev; 392 fp->private_data = soft->cxdev;
393 unlock_kernel();
390 return 0; 394 return 0;
391 } 395 }
392 } 396 }
393 397
398 unlock_kernel();
394 return -ENODEV; 399 return -ENODEV;
395} 400}
396 401
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 934ffafedaea..070e22e8ea9e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -26,6 +26,7 @@
26#include <linux/bootmem.h> 26#include <linux/bootmem.h>
27#include <linux/splice.h> 27#include <linux/splice.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/smp_lock.h>
29 30
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/io.h> 32#include <asm/io.h>
@@ -889,6 +890,9 @@ static const struct file_operations kmsg_fops = {
889 890
890static int memory_open(struct inode * inode, struct file * filp) 891static int memory_open(struct inode * inode, struct file * filp)
891{ 892{
893 int ret = 0;
894
895 lock_kernel();
892 switch (iminor(inode)) { 896 switch (iminor(inode)) {
893 case 1: 897 case 1:
894 filp->f_op = &mem_fops; 898 filp->f_op = &mem_fops;
@@ -932,11 +936,13 @@ static int memory_open(struct inode * inode, struct file * filp)
932 break; 936 break;
933#endif 937#endif
934 default: 938 default:
939 unlock_kernel();
935 return -ENXIO; 940 return -ENXIO;
936 } 941 }
937 if (filp->f_op && filp->f_op->open) 942 if (filp->f_op && filp->f_op->open)
938 return filp->f_op->open(inode,filp); 943 ret = filp->f_op->open(inode,filp);
939 return 0; 944 unlock_kernel();
945 return ret;
940} 946}
941 947
942static const struct file_operations memory_fops = { 948static const struct file_operations memory_fops = {
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index eaace0db0ff4..6e1563c3d30a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/tty.h> 50#include <linux/tty.h>
51#include <linux/kmod.h> 51#include <linux/kmod.h>
52#include <linux/smp_lock.h>
52 53
53/* 54/*
54 * Head entry for the doubly linked miscdevice list 55 * Head entry for the doubly linked miscdevice list
@@ -118,6 +119,7 @@ static int misc_open(struct inode * inode, struct file * file)
118 int err = -ENODEV; 119 int err = -ENODEV;
119 const struct file_operations *old_fops, *new_fops = NULL; 120 const struct file_operations *old_fops, *new_fops = NULL;
120 121
122 lock_kernel();
121 mutex_lock(&misc_mtx); 123 mutex_lock(&misc_mtx);
122 124
123 list_for_each_entry(c, &misc_list, list) { 125 list_for_each_entry(c, &misc_list, list) {
@@ -155,6 +157,7 @@ static int misc_open(struct inode * inode, struct file * file)
155 fops_put(old_fops); 157 fops_put(old_fops);
156fail: 158fail:
157 mutex_unlock(&misc_mtx); 159 mutex_unlock(&misc_mtx);
160 unlock_kernel();
158 return err; 161 return err;
159} 162}
160 163
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 8d14823b0514..50243fcd87e8 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -56,6 +56,7 @@
56#include <linux/serial.h> 56#include <linux/serial.h>
57#include <linux/sched.h> 57#include <linux/sched.h>
58#include <linux/spinlock.h> 58#include <linux/spinlock.h>
59#include <linux/smp_lock.h>
59#include <linux/delay.h> 60#include <linux/delay.h>
60#include <linux/serial_8250.h> 61#include <linux/serial_8250.h>
61#include "smapi.h" 62#include "smapi.h"
@@ -100,6 +101,7 @@ static int mwave_open(struct inode *inode, struct file *file)
100 PRINTK_2(TRACE_MWAVE, 101 PRINTK_2(TRACE_MWAVE,
101 "mwavedd::mwave_open, exit return retval %x\n", retval); 102 "mwavedd::mwave_open, exit return retval %x\n", retval);
102 103
104 cycle_kernel_lock();
103 return retval; 105 return retval;
104} 106}
105 107
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 98dec380af49..197cd7a0c332 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -107,6 +107,7 @@
107#include <linux/init.h> 107#include <linux/init.h>
108#include <linux/proc_fs.h> 108#include <linux/proc_fs.h>
109#include <linux/spinlock.h> 109#include <linux/spinlock.h>
110#include <linux/smp_lock.h>
110 111
111#include <asm/io.h> 112#include <asm/io.h>
112#include <asm/uaccess.h> 113#include <asm/uaccess.h>
@@ -333,12 +334,14 @@ nvram_ioctl(struct inode *inode, struct file *file,
333static int 334static int
334nvram_open(struct inode *inode, struct file *file) 335nvram_open(struct inode *inode, struct file *file)
335{ 336{
337 lock_kernel();
336 spin_lock(&nvram_state_lock); 338 spin_lock(&nvram_state_lock);
337 339
338 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || 340 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
339 (nvram_open_mode & NVRAM_EXCL) || 341 (nvram_open_mode & NVRAM_EXCL) ||
340 ((file->f_mode & 2) && (nvram_open_mode & NVRAM_WRITE))) { 342 ((file->f_mode & 2) && (nvram_open_mode & NVRAM_WRITE))) {
341 spin_unlock(&nvram_state_lock); 343 spin_unlock(&nvram_state_lock);
344 unlock_kernel();
342 return -EBUSY; 345 return -EBUSY;
343 } 346 }
344 347
@@ -349,6 +352,7 @@ nvram_open(struct inode *inode, struct file *file)
349 nvram_open_cnt++; 352 nvram_open_cnt++;
350 353
351 spin_unlock(&nvram_state_lock); 354 spin_unlock(&nvram_state_lock);
355 unlock_kernel();
352 356
353 return 0; 357 return 0;
354} 358}
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index ecfaf180e5bd..b930de50407a 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -20,6 +20,7 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/nsc_gpio.h> 21#include <linux/nsc_gpio.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/smp_lock.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
25#define DEVNAME "pc8736x_gpio" 26#define DEVNAME "pc8736x_gpio"
@@ -217,6 +218,7 @@ static int pc8736x_gpio_open(struct inode *inode, struct file *file)
217 unsigned m = iminor(inode); 218 unsigned m = iminor(inode);
218 file->private_data = &pc8736x_gpio_ops; 219 file->private_data = &pc8736x_gpio_ops;
219 220
221 cycle_kernel_lock();
220 dev_dbg(&pdev->dev, "open %d\n", m); 222 dev_dbg(&pdev->dev, "open %d\n", m);
221 223
222 if (m >= PC8736X_GPIO_CT) 224 if (m >= PC8736X_GPIO_CT)
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4a933d413423..59ca35156d81 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -32,8 +32,9 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h> 34#include <linux/bitrev.h>
35#include <asm/uaccess.h> 35#include <linux/smp_lock.h>
36#include <asm/io.h> 36#include <linux/uaccess.h>
37#include <linux/io.h>
37 38
38#include <pcmcia/cs_types.h> 39#include <pcmcia/cs_types.h>
39#include <pcmcia/cs.h> 40#include <pcmcia/cs.h>
@@ -1405,11 +1406,11 @@ static void stop_monitor(struct cm4000_dev *dev)
1405 DEBUGP(3, dev, "<- stop_monitor\n"); 1406 DEBUGP(3, dev, "<- stop_monitor\n");
1406} 1407}
1407 1408
1408static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 1409static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1409 unsigned long arg)
1410{ 1410{
1411 struct cm4000_dev *dev = filp->private_data; 1411 struct cm4000_dev *dev = filp->private_data;
1412 unsigned int iobase = dev->p_dev->io.BasePort1; 1412 unsigned int iobase = dev->p_dev->io.BasePort1;
1413 struct inode *inode = filp->f_path.dentry->d_inode;
1413 struct pcmcia_device *link; 1414 struct pcmcia_device *link;
1414 int size; 1415 int size;
1415 int rc; 1416 int rc;
@@ -1426,38 +1427,42 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1426 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), 1427 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
1427 iminor(inode), ioctl_names[_IOC_NR(cmd)]); 1428 iminor(inode), ioctl_names[_IOC_NR(cmd)]);
1428 1429
1430 lock_kernel();
1431 rc = -ENODEV;
1429 link = dev_table[iminor(inode)]; 1432 link = dev_table[iminor(inode)];
1430 if (!pcmcia_dev_present(link)) { 1433 if (!pcmcia_dev_present(link)) {
1431 DEBUGP(4, dev, "DEV_OK false\n"); 1434 DEBUGP(4, dev, "DEV_OK false\n");
1432 return -ENODEV; 1435 goto out;
1433 } 1436 }
1434 1437
1435 if (test_bit(IS_CMM_ABSENT, &dev->flags)) { 1438 if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
1436 DEBUGP(4, dev, "CMM_ABSENT flag set\n"); 1439 DEBUGP(4, dev, "CMM_ABSENT flag set\n");
1437 return -ENODEV; 1440 goto out;
1438 } 1441 }
1442 rc = EINVAL;
1439 1443
1440 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { 1444 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
1441 DEBUGP(4, dev, "ioctype mismatch\n"); 1445 DEBUGP(4, dev, "ioctype mismatch\n");
1442 return -EINVAL; 1446 goto out;
1443 } 1447 }
1444 if (_IOC_NR(cmd) > CM_IOC_MAXNR) { 1448 if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
1445 DEBUGP(4, dev, "iocnr mismatch\n"); 1449 DEBUGP(4, dev, "iocnr mismatch\n");
1446 return -EINVAL; 1450 goto out;
1447 } 1451 }
1448 size = _IOC_SIZE(cmd); 1452 size = _IOC_SIZE(cmd);
1449 rc = 0; 1453 rc = -EFAULT;
1450 DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", 1454 DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
1451 _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); 1455 _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
1452 1456
1453 if (_IOC_DIR(cmd) & _IOC_READ) { 1457 if (_IOC_DIR(cmd) & _IOC_READ) {
1454 if (!access_ok(VERIFY_WRITE, argp, size)) 1458 if (!access_ok(VERIFY_WRITE, argp, size))
1455 return -EFAULT; 1459 goto out;
1456 } 1460 }
1457 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1461 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1458 if (!access_ok(VERIFY_READ, argp, size)) 1462 if (!access_ok(VERIFY_READ, argp, size))
1459 return -EFAULT; 1463 goto out;
1460 } 1464 }
1465 rc = 0;
1461 1466
1462 switch (cmd) { 1467 switch (cmd) {
1463 case CM_IOCGSTATUS: 1468 case CM_IOCGSTATUS:
@@ -1477,9 +1482,9 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1477 if (test_bit(IS_BAD_CARD, &dev->flags)) 1482 if (test_bit(IS_BAD_CARD, &dev->flags))
1478 status |= CM_BAD_CARD; 1483 status |= CM_BAD_CARD;
1479 if (copy_to_user(argp, &status, sizeof(int))) 1484 if (copy_to_user(argp, &status, sizeof(int)))
1480 return -EFAULT; 1485 rc = -EFAULT;
1481 } 1486 }
1482 return 0; 1487 break;
1483 case CM_IOCGATR: 1488 case CM_IOCGATR:
1484 DEBUGP(4, dev, "... in CM_IOCGATR\n"); 1489 DEBUGP(4, dev, "... in CM_IOCGATR\n");
1485 { 1490 {
@@ -1492,25 +1497,29 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1492 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) 1497 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
1493 != 0)))) { 1498 != 0)))) {
1494 if (filp->f_flags & O_NONBLOCK) 1499 if (filp->f_flags & O_NONBLOCK)
1495 return -EAGAIN; 1500 rc = -EAGAIN;
1496 return -ERESTARTSYS; 1501 else
1502 rc = -ERESTARTSYS;
1503 break;
1497 } 1504 }
1498 1505
1506 rc = -EFAULT;
1499 if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { 1507 if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
1500 tmp = -1; 1508 tmp = -1;
1501 if (copy_to_user(&(atreq->atr_len), &tmp, 1509 if (copy_to_user(&(atreq->atr_len), &tmp,
1502 sizeof(int))) 1510 sizeof(int)))
1503 return -EFAULT; 1511 break;
1504 } else { 1512 } else {
1505 if (copy_to_user(atreq->atr, dev->atr, 1513 if (copy_to_user(atreq->atr, dev->atr,
1506 dev->atr_len)) 1514 dev->atr_len))
1507 return -EFAULT; 1515 break;
1508 1516
1509 tmp = dev->atr_len; 1517 tmp = dev->atr_len;
1510 if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) 1518 if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
1511 return -EFAULT; 1519 break;
1512 } 1520 }
1513 return 0; 1521 rc = 0;
1522 break;
1514 } 1523 }
1515 case CM_IOCARDOFF: 1524 case CM_IOCARDOFF:
1516 1525
@@ -1538,8 +1547,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1538 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) 1547 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
1539 == 0)))) { 1548 == 0)))) {
1540 if (filp->f_flags & O_NONBLOCK) 1549 if (filp->f_flags & O_NONBLOCK)
1541 return -EAGAIN; 1550 rc = -EAGAIN;
1542 return -ERESTARTSYS; 1551 else
1552 rc = -ERESTARTSYS;
1553 break;
1543 } 1554 }
1544 /* Set Flags0 = 0x42 */ 1555 /* Set Flags0 = 0x42 */
1545 DEBUGP(4, dev, "Set Flags0=0x42 \n"); 1556 DEBUGP(4, dev, "Set Flags0=0x42 \n");
@@ -1554,8 +1565,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1554 || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != 1565 || (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
1555 0)))) { 1566 0)))) {
1556 if (filp->f_flags & O_NONBLOCK) 1567 if (filp->f_flags & O_NONBLOCK)
1557 return -EAGAIN; 1568 rc = -EAGAIN;
1558 return -ERESTARTSYS; 1569 else
1570 rc = -ERESTARTSYS;
1571 break;
1559 } 1572 }
1560 } 1573 }
1561 /* release lock */ 1574 /* release lock */
@@ -1568,8 +1581,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1568 struct ptsreq krnptsreq; 1581 struct ptsreq krnptsreq;
1569 1582
1570 if (copy_from_user(&krnptsreq, argp, 1583 if (copy_from_user(&krnptsreq, argp,
1571 sizeof(struct ptsreq))) 1584 sizeof(struct ptsreq))) {
1572 return -EFAULT; 1585 rc = -EFAULT;
1586 break;
1587 }
1573 1588
1574 rc = 0; 1589 rc = 0;
1575 DEBUGP(4, dev, "... in CM_IOCSPTS\n"); 1590 DEBUGP(4, dev, "... in CM_IOCSPTS\n");
@@ -1580,8 +1595,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1580 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) 1595 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
1581 != 0)))) { 1596 != 0)))) {
1582 if (filp->f_flags & O_NONBLOCK) 1597 if (filp->f_flags & O_NONBLOCK)
1583 return -EAGAIN; 1598 rc = -EAGAIN;
1584 return -ERESTARTSYS; 1599 else
1600 rc = -ERESTARTSYS;
1601 break;
1585 } 1602 }
1586 /* get IO lock */ 1603 /* get IO lock */
1587 if (wait_event_interruptible 1604 if (wait_event_interruptible
@@ -1590,8 +1607,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1590 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) 1607 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
1591 == 0)))) { 1608 == 0)))) {
1592 if (filp->f_flags & O_NONBLOCK) 1609 if (filp->f_flags & O_NONBLOCK)
1593 return -EAGAIN; 1610 rc = -EAGAIN;
1594 return -ERESTARTSYS; 1611 else
1612 rc = -ERESTARTSYS;
1613 break;
1595 } 1614 }
1596 1615
1597 if ((rc = set_protocol(dev, &krnptsreq)) != 0) { 1616 if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
@@ -1604,7 +1623,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1604 wake_up_interruptible(&dev->ioq); 1623 wake_up_interruptible(&dev->ioq);
1605 1624
1606 } 1625 }
1607 return rc; 1626 break;
1608#ifdef PCMCIA_DEBUG 1627#ifdef PCMCIA_DEBUG
1609 case CM_IOSDBGLVL: /* set debug log level */ 1628 case CM_IOSDBGLVL: /* set debug log level */
1610 { 1629 {
@@ -1612,18 +1631,20 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1612 1631
1613 old_pc_debug = pc_debug; 1632 old_pc_debug = pc_debug;
1614 if (copy_from_user(&pc_debug, argp, sizeof(int))) 1633 if (copy_from_user(&pc_debug, argp, sizeof(int)))
1615 return -EFAULT; 1634 rc = -EFAULT;
1616 1635 else if (old_pc_debug != pc_debug)
1617 if (old_pc_debug != pc_debug)
1618 DEBUGP(0, dev, "Changed debug log level " 1636 DEBUGP(0, dev, "Changed debug log level "
1619 "to %i\n", pc_debug); 1637 "to %i\n", pc_debug);
1620 } 1638 }
1621 return rc; 1639 break;
1622#endif 1640#endif
1623 default: 1641 default:
1624 DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); 1642 DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
1625 return -EINVAL; 1643 rc = -ENOTTY;
1626 } 1644 }
1645out:
1646 unlock_kernel();
1647 return rc;
1627} 1648}
1628 1649
1629static int cmm_open(struct inode *inode, struct file *filp) 1650static int cmm_open(struct inode *inode, struct file *filp)
@@ -1631,16 +1652,22 @@ static int cmm_open(struct inode *inode, struct file *filp)
1631 struct cm4000_dev *dev; 1652 struct cm4000_dev *dev;
1632 struct pcmcia_device *link; 1653 struct pcmcia_device *link;
1633 int minor = iminor(inode); 1654 int minor = iminor(inode);
1655 int ret;
1634 1656
1635 if (minor >= CM4000_MAX_DEV) 1657 if (minor >= CM4000_MAX_DEV)
1636 return -ENODEV; 1658 return -ENODEV;
1637 1659
1660 lock_kernel();
1638 link = dev_table[minor]; 1661 link = dev_table[minor];
1639 if (link == NULL || !pcmcia_dev_present(link)) 1662 if (link == NULL || !pcmcia_dev_present(link)) {
1640 return -ENODEV; 1663 ret = -ENODEV;
1664 goto out;
1665 }
1641 1666
1642 if (link->open) 1667 if (link->open) {
1643 return -EBUSY; 1668 ret = -EBUSY;
1669 goto out;
1670 }
1644 1671
1645 dev = link->priv; 1672 dev = link->priv;
1646 filp->private_data = dev; 1673 filp->private_data = dev;
@@ -1660,8 +1687,10 @@ static int cmm_open(struct inode *inode, struct file *filp)
1660 * vaild = block until valid (or card 1687 * vaild = block until valid (or card
1661 * inserted) 1688 * inserted)
1662 */ 1689 */
1663 if (filp->f_flags & O_NONBLOCK) 1690 if (filp->f_flags & O_NONBLOCK) {
1664 return -EAGAIN; 1691 ret = -EAGAIN;
1692 goto out;
1693 }
1665 1694
1666 dev->mdelay = T_50MSEC; 1695 dev->mdelay = T_50MSEC;
1667 1696
@@ -1671,7 +1700,10 @@ static int cmm_open(struct inode *inode, struct file *filp)
1671 link->open = 1; /* only one open per device */ 1700 link->open = 1; /* only one open per device */
1672 1701
1673 DEBUGP(2, dev, "<- cmm_open\n"); 1702 DEBUGP(2, dev, "<- cmm_open\n");
1674 return nonseekable_open(inode, filp); 1703 ret = nonseekable_open(inode, filp);
1704out:
1705 unlock_kernel();
1706 return ret;
1675} 1707}
1676 1708
1677static int cmm_close(struct inode *inode, struct file *filp) 1709static int cmm_close(struct inode *inode, struct file *filp)
@@ -1897,7 +1929,7 @@ static const struct file_operations cm4000_fops = {
1897 .owner = THIS_MODULE, 1929 .owner = THIS_MODULE,
1898 .read = cmm_read, 1930 .read = cmm_read,
1899 .write = cmm_write, 1931 .write = cmm_write,
1900 .ioctl = cmm_ioctl, 1932 .unlocked_ioctl = cmm_ioctl,
1901 .open = cmm_open, 1933 .open = cmm_open,
1902 .release= cmm_close, 1934 .release= cmm_close,
1903}; 1935};
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 035084c07329..6181f8a9b0bd 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/poll.h> 28#include <linux/poll.h>
29#include <linux/smp_lock.h>
29#include <linux/wait.h> 30#include <linux/wait.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/io.h> 32#include <asm/io.h>
@@ -448,23 +449,30 @@ static int cm4040_open(struct inode *inode, struct file *filp)
448 struct reader_dev *dev; 449 struct reader_dev *dev;
449 struct pcmcia_device *link; 450 struct pcmcia_device *link;
450 int minor = iminor(inode); 451 int minor = iminor(inode);
452 int ret;
451 453
452 if (minor >= CM_MAX_DEV) 454 if (minor >= CM_MAX_DEV)
453 return -ENODEV; 455 return -ENODEV;
454 456
457 lock_kernel();
455 link = dev_table[minor]; 458 link = dev_table[minor];
456 if (link == NULL || !pcmcia_dev_present(link)) 459 if (link == NULL || !pcmcia_dev_present(link)) {
457 return -ENODEV; 460 ret = -ENODEV;
461 goto out;
462 }
458 463
459 if (link->open) 464 if (link->open) {
460 return -EBUSY; 465 ret = -EBUSY;
466 goto out;
467 }
461 468
462 dev = link->priv; 469 dev = link->priv;
463 filp->private_data = dev; 470 filp->private_data = dev;
464 471
465 if (filp->f_flags & O_NONBLOCK) { 472 if (filp->f_flags & O_NONBLOCK) {
466 DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); 473 DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
467 return -EAGAIN; 474 ret = -EAGAIN;
475 goto out;
468 } 476 }
469 477
470 link->open = 1; 478 link->open = 1;
@@ -473,7 +481,10 @@ static int cm4040_open(struct inode *inode, struct file *filp)
473 mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); 481 mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
474 482
475 DEBUGP(2, dev, "<- cm4040_open (successfully)\n"); 483 DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
476 return nonseekable_open(inode, filp); 484 ret = nonseekable_open(inode, filp);
485out:
486 unlock_kernel();
487 return ret;
477} 488}
478 489
479static int cm4040_close(struct inode *inode, struct file *filp) 490static int cm4040_close(struct inode *inode, struct file *filp)
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 00c7f8407e3e..cc7dcea2d283 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -28,7 +28,6 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <pcmcia/version.h>
32#include <pcmcia/cisreg.h> 31#include <pcmcia/cisreg.h>
33#include <pcmcia/device_id.h> 32#include <pcmcia/device_id.h>
34#include <pcmcia/ss.h> 33#include <pcmcia/ss.h>
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 3aab837d9480..f6e6acadd9a0 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -66,6 +66,7 @@
66#include <linux/poll.h> 66#include <linux/poll.h>
67#include <linux/major.h> 67#include <linux/major.h>
68#include <linux/ppdev.h> 68#include <linux/ppdev.h>
69#include <linux/smp_lock.h>
69#include <asm/uaccess.h> 70#include <asm/uaccess.h>
70 71
71#define PP_VERSION "ppdev: user-space parallel port driver" 72#define PP_VERSION "ppdev: user-space parallel port driver"
@@ -638,6 +639,7 @@ static int pp_open (struct inode * inode, struct file * file)
638 unsigned int minor = iminor(inode); 639 unsigned int minor = iminor(inode);
639 struct pp_struct *pp; 640 struct pp_struct *pp;
640 641
642 cycle_kernel_lock();
641 if (minor >= PARPORT_MAX) 643 if (minor >= PARPORT_MAX)
642 return -ENXIO; 644 return -ENXIO;
643 645
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index bbfa0e241cba..505fcbe884a4 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -19,6 +19,7 @@
19#include <linux/cdev.h> 19#include <linux/cdev.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/smp_lock.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
@@ -53,6 +54,7 @@ static int raw_open(struct inode *inode, struct file *filp)
53 return 0; 54 return 0;
54 } 55 }
55 56
57 lock_kernel();
56 mutex_lock(&raw_mutex); 58 mutex_lock(&raw_mutex);
57 59
58 /* 60 /*
@@ -79,6 +81,7 @@ static int raw_open(struct inode *inode, struct file *filp)
79 bdev->bd_inode->i_mapping; 81 bdev->bd_inode->i_mapping;
80 filp->private_data = bdev; 82 filp->private_data = bdev;
81 mutex_unlock(&raw_mutex); 83 mutex_unlock(&raw_mutex);
84 unlock_kernel();
82 return 0; 85 return 0;
83 86
84out2: 87out2:
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 909cac93fa2a..fa92a8af5a5a 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -73,6 +73,7 @@
73#include <linux/proc_fs.h> 73#include <linux/proc_fs.h>
74#include <linux/seq_file.h> 74#include <linux/seq_file.h>
75#include <linux/spinlock.h> 75#include <linux/spinlock.h>
76#include <linux/smp_lock.h>
76#include <linux/sysctl.h> 77#include <linux/sysctl.h>
77#include <linux/wait.h> 78#include <linux/wait.h>
78#include <linux/bcd.h> 79#include <linux/bcd.h>
@@ -734,6 +735,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
734 * needed here. Or anywhere else in this driver. */ 735 * needed here. Or anywhere else in this driver. */
735static int rtc_open(struct inode *inode, struct file *file) 736static int rtc_open(struct inode *inode, struct file *file)
736{ 737{
738 lock_kernel();
737 spin_lock_irq(&rtc_lock); 739 spin_lock_irq(&rtc_lock);
738 740
739 if (rtc_status & RTC_IS_OPEN) 741 if (rtc_status & RTC_IS_OPEN)
@@ -743,10 +745,12 @@ static int rtc_open(struct inode *inode, struct file *file)
743 745
744 rtc_irq_data = 0; 746 rtc_irq_data = 0;
745 spin_unlock_irq(&rtc_lock); 747 spin_unlock_irq(&rtc_lock);
748 unlock_kernel();
746 return 0; 749 return 0;
747 750
748out_busy: 751out_busy:
749 spin_unlock_irq(&rtc_lock); 752 spin_unlock_irq(&rtc_lock);
753 unlock_kernel();
750 return -EBUSY; 754 return -EBUSY;
751} 755}
752 756
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 99e5272e3c53..1d9100561c8a 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/smp_lock.h>
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16#include <asm/io.h> 17#include <asm/io.h>
17 18
@@ -51,6 +52,7 @@ static int scx200_gpio_open(struct inode *inode, struct file *file)
51 unsigned m = iminor(inode); 52 unsigned m = iminor(inode);
52 file->private_data = &scx200_gpio_ops; 53 file->private_data = &scx200_gpio_ops;
53 54
55 cycle_kernel_lock();
54 if (m >= MAX_PINS) 56 if (m >= MAX_PINS)
55 return -EINVAL; 57 return -EINVAL;
56 return nonseekable_open(inode, file); 58 return nonseekable_open(inode, file);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 8fe099a41065..0b799ac1b049 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -21,6 +21,7 @@
21#include <linux/poll.h> 21#include <linux/poll.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/smp_lock.h>
24#include <asm/sn/io.h> 25#include <asm/sn/io.h>
25#include <asm/sn/sn_sal.h> 26#include <asm/sn/sn_sal.h>
26#include <asm/sn/module.h> 27#include <asm/sn/module.h>
@@ -104,6 +105,7 @@ scdrv_open(struct inode *inode, struct file *file)
104 file->private_data = sd; 105 file->private_data = sd;
105 106
106 /* hook this subchannel up to the system controller interrupt */ 107 /* hook this subchannel up to the system controller interrupt */
108 lock_kernel();
107 rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt, 109 rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
108 IRQF_SHARED | IRQF_DISABLED, 110 IRQF_SHARED | IRQF_DISABLED,
109 SYSCTL_BASENAME, sd); 111 SYSCTL_BASENAME, sd);
@@ -111,9 +113,10 @@ scdrv_open(struct inode *inode, struct file *file)
111 ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); 113 ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
112 kfree(sd); 114 kfree(sd);
113 printk("%s: irq request failed (%d)\n", __func__, rv); 115 printk("%s: irq request failed (%d)\n", __func__, rv);
116 unlock_kernel();
114 return -EBUSY; 117 return -EBUSY;
115 } 118 }
116 119 unlock_kernel();
117 return 0; 120 return 0;
118} 121}
119 122
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 58533de59027..85e0eb76eeab 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -49,6 +49,7 @@
49#include <linux/err.h> 49#include <linux/err.h>
50#include <linux/kfifo.h> 50#include <linux/kfifo.h>
51#include <linux/platform_device.h> 51#include <linux/platform_device.h>
52#include <linux/smp_lock.h>
52 53
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
54#include <asm/io.h> 55#include <asm/io.h>
@@ -906,12 +907,14 @@ static int sonypi_misc_release(struct inode *inode, struct file *file)
906 907
907static int sonypi_misc_open(struct inode *inode, struct file *file) 908static int sonypi_misc_open(struct inode *inode, struct file *file)
908{ 909{
910 lock_kernel();
909 mutex_lock(&sonypi_device.lock); 911 mutex_lock(&sonypi_device.lock);
910 /* Flush input queue on first open */ 912 /* Flush input queue on first open */
911 if (!sonypi_device.open_count) 913 if (!sonypi_device.open_count)
912 kfifo_reset(sonypi_device.fifo); 914 kfifo_reset(sonypi_device.fifo);
913 sonypi_device.open_count++; 915 sonypi_device.open_count++;
914 mutex_unlock(&sonypi_device.lock); 916 mutex_unlock(&sonypi_device.lock);
917 unlock_kernel();
915 return 0; 918 return 0;
916} 919}
917 920
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 4c431cb7cf1b..6062b62800fd 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/smp_lock.h>
24 25
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/reboot.h> 27#include <asm/reboot.h>
@@ -236,6 +237,7 @@ static int tanbac_tb0219_open(struct inode *inode, struct file *file)
236{ 237{
237 unsigned int minor; 238 unsigned int minor;
238 239
240 cycle_kernel_lock();
239 minor = iminor(inode); 241 minor = iminor(inode);
240 switch (minor) { 242 switch (minor) {
241 case 0: 243 case 0:
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 35e58030d296..8f2284be68e1 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -36,6 +36,7 @@
36#include <linux/ioport.h> 36#include <linux/ioport.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/smp_lock.h>
39#include <linux/timer.h> 40#include <linux/timer.h>
40#include <linux/sysfs.h> 41#include <linux/sysfs.h>
41#include <linux/device.h> 42#include <linux/device.h>
@@ -204,11 +205,14 @@ static int tlclk_open(struct inode *inode, struct file *filp)
204{ 205{
205 int result; 206 int result;
206 207
207 if (test_and_set_bit(0, &useflags)) 208 lock_kernel();
208 return -EBUSY; 209 if (test_and_set_bit(0, &useflags)) {
210 result = -EBUSY;
209 /* this legacy device is always one per system and it doesn't 211 /* this legacy device is always one per system and it doesn't
210 * know how to handle multiple concurrent clients. 212 * know how to handle multiple concurrent clients.
211 */ 213 */
214 goto out;
215 }
212 216
213 /* Make sure there is no interrupt pending while 217 /* Make sure there is no interrupt pending while
214 * initialising interrupt handler */ 218 * initialising interrupt handler */
@@ -218,13 +222,14 @@ static int tlclk_open(struct inode *inode, struct file *filp)
218 * we can't share this IRQ */ 222 * we can't share this IRQ */
219 result = request_irq(telclk_interrupt, &tlclk_interrupt, 223 result = request_irq(telclk_interrupt, &tlclk_interrupt,
220 IRQF_DISABLED, "telco_clock", tlclk_interrupt); 224 IRQF_DISABLED, "telco_clock", tlclk_interrupt);
221 if (result == -EBUSY) { 225 if (result == -EBUSY)
222 printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n"); 226 printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
223 return -EBUSY; 227 else
224 } 228 inb(TLCLK_REG6); /* Clear interrupt events */
225 inb(TLCLK_REG6); /* Clear interrupt events */
226 229
227 return 0; 230out:
231 unlock_kernel();
232 return result;
228} 233}
229 234
230static int tlclk_release(struct inode *inode, struct file *filp) 235static int tlclk_release(struct inode *inode, struct file *filp)
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index a5d8bcb40000..e1fc193d9396 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -26,6 +26,7 @@
26#include <linux/poll.h> 26#include <linux/poll.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/smp_lock.h>
29 30
30#include "tpm.h" 31#include "tpm.h"
31 32
@@ -897,6 +898,7 @@ int tpm_open(struct inode *inode, struct file *file)
897 int rc = 0, minor = iminor(inode); 898 int rc = 0, minor = iminor(inode);
898 struct tpm_chip *chip = NULL, *pos; 899 struct tpm_chip *chip = NULL, *pos;
899 900
901 lock_kernel();
900 spin_lock(&driver_lock); 902 spin_lock(&driver_lock);
901 903
902 list_for_each_entry(pos, &tpm_chip_list, list) { 904 list_for_each_entry(pos, &tpm_chip_list, list) {
@@ -926,16 +928,19 @@ int tpm_open(struct inode *inode, struct file *file)
926 if (chip->data_buffer == NULL) { 928 if (chip->data_buffer == NULL) {
927 chip->num_opens--; 929 chip->num_opens--;
928 put_device(chip->dev); 930 put_device(chip->dev);
931 unlock_kernel();
929 return -ENOMEM; 932 return -ENOMEM;
930 } 933 }
931 934
932 atomic_set(&chip->data_pending, 0); 935 atomic_set(&chip->data_pending, 0);
933 936
934 file->private_data = chip; 937 file->private_data = chip;
938 unlock_kernel();
935 return 0; 939 return 0;
936 940
937err_out: 941err_out:
938 spin_unlock(&driver_lock); 942 spin_unlock(&driver_lock);
943 unlock_kernel();
939 return rc; 944 return rc;
940} 945}
941EXPORT_SYMBOL_GPL(tpm_open); 946EXPORT_SYMBOL_GPL(tpm_open);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 750131010af0..047a17339f83 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2665,7 +2665,7 @@ static void release_dev(struct file *filp)
2665 * ->siglock protects ->signal/->sighand 2665 * ->siglock protects ->signal/->sighand
2666 */ 2666 */
2667 2667
2668static int tty_open(struct inode *inode, struct file *filp) 2668static int __tty_open(struct inode *inode, struct file *filp)
2669{ 2669{
2670 struct tty_struct *tty; 2670 struct tty_struct *tty;
2671 int noctty, retval; 2671 int noctty, retval;
@@ -2779,6 +2779,19 @@ got_driver:
2779 return 0; 2779 return 0;
2780} 2780}
2781 2781
2782/* BKL pushdown: scary code avoidance wrapper */
2783static int tty_open(struct inode *inode, struct file *filp)
2784{
2785 int ret;
2786
2787 lock_kernel();
2788 ret = __tty_open(inode, filp);
2789 unlock_kernel();
2790 return ret;
2791}
2792
2793
2794
2782#ifdef CONFIG_UNIX98_PTYS 2795#ifdef CONFIG_UNIX98_PTYS
2783/** 2796/**
2784 * ptmx_open - open a unix 98 pty master 2797 * ptmx_open - open a unix 98 pty master
@@ -2792,7 +2805,7 @@ got_driver:
2792 * allocated_ptys_lock handles the list of free pty numbers 2805 * allocated_ptys_lock handles the list of free pty numbers
2793 */ 2806 */
2794 2807
2795static int ptmx_open(struct inode *inode, struct file *filp) 2808static int __ptmx_open(struct inode *inode, struct file *filp)
2796{ 2809{
2797 struct tty_struct *tty; 2810 struct tty_struct *tty;
2798 int retval; 2811 int retval;
@@ -2831,6 +2844,16 @@ out:
2831 devpts_kill_index(index); 2844 devpts_kill_index(index);
2832 return retval; 2845 return retval;
2833} 2846}
2847
2848static int ptmx_open(struct inode *inode, struct file *filp)
2849{
2850 int ret;
2851
2852 lock_kernel();
2853 ret = __ptmx_open(inode, filp);
2854 unlock_kernel();
2855 return ret;
2856}
2834#endif 2857#endif
2835 2858
2836/** 2859/**
@@ -2886,15 +2909,16 @@ static int tty_fasync(int fd, struct file *filp, int on)
2886{ 2909{
2887 struct tty_struct *tty; 2910 struct tty_struct *tty;
2888 unsigned long flags; 2911 unsigned long flags;
2889 int retval; 2912 int retval = 0;
2890 2913
2914 lock_kernel();
2891 tty = (struct tty_struct *)filp->private_data; 2915 tty = (struct tty_struct *)filp->private_data;
2892 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) 2916 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
2893 return 0; 2917 goto out;
2894 2918
2895 retval = fasync_helper(fd, filp, on, &tty->fasync); 2919 retval = fasync_helper(fd, filp, on, &tty->fasync);
2896 if (retval <= 0) 2920 if (retval <= 0)
2897 return retval; 2921 goto out;
2898 2922
2899 if (on) { 2923 if (on) {
2900 enum pid_type type; 2924 enum pid_type type;
@@ -2912,12 +2936,15 @@ static int tty_fasync(int fd, struct file *filp, int on)
2912 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 2936 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
2913 retval = __f_setown(filp, pid, type, 0); 2937 retval = __f_setown(filp, pid, type, 0);
2914 if (retval) 2938 if (retval)
2915 return retval; 2939 goto out;
2916 } else { 2940 } else {
2917 if (!tty->fasync && !waitqueue_active(&tty->read_wait)) 2941 if (!tty->fasync && !waitqueue_active(&tty->read_wait))
2918 tty->minimum_to_wake = N_TTY_BUF_SIZE; 2942 tty->minimum_to_wake = N_TTY_BUF_SIZE;
2919 } 2943 }
2920 return 0; 2944 retval = 0;
2945out:
2946 unlock_kernel();
2947 return retval;
2921} 2948}
2922 2949
2923/** 2950/**
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index 83aeedda200c..eebfad2777d2 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -34,6 +34,7 @@
34#include <linux/kbd_kern.h> 34#include <linux/kbd_kern.h>
35#include <linux/console.h> 35#include <linux/console.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/smp_lock.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/byteorder.h> 40#include <asm/byteorder.h>
@@ -460,9 +461,13 @@ static int
460vcs_open(struct inode *inode, struct file *filp) 461vcs_open(struct inode *inode, struct file *filp)
461{ 462{
462 unsigned int currcons = iminor(inode) & 127; 463 unsigned int currcons = iminor(inode) & 127;
464 int ret = 0;
465
466 lock_kernel();
463 if(currcons && !vc_cons_allocated(currcons-1)) 467 if(currcons && !vc_cons_allocated(currcons-1))
464 return -ENXIO; 468 ret = -ENXIO;
465 return 0; 469 unlock_kernel();
470 return ret;
466} 471}
467 472
468static const struct file_operations vcs_fops = { 473static const struct file_operations vcs_fops = {
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index d4db42ca71e6..e5da98d8f9cd 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -699,6 +699,7 @@ static int viotap_open(struct inode *inode, struct file *file)
699 if (op == NULL) 699 if (op == NULL)
700 return -ENOMEM; 700 return -ENOMEM;
701 701
702 lock_kernel();
702 get_dev_info(file->f_path.dentry->d_inode, &devi); 703 get_dev_info(file->f_path.dentry->d_inode, &devi);
703 704
704 /* Note: We currently only support one mode! */ 705 /* Note: We currently only support one mode! */
@@ -729,6 +730,7 @@ static int viotap_open(struct inode *inode, struct file *file)
729 730
730free_op: 731free_op:
731 free_op_struct(op); 732 free_op_struct(op);
733 unlock_kernel();
732 return ret; 734 return ret;
733} 735}
734 736
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index e5ed09192be8..ffe9b4e3072e 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/smp_lock.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/types.h> 32#include <linux/types.h>
32 33
@@ -547,6 +548,7 @@ static int gpio_open(struct inode *inode, struct file *file)
547{ 548{
548 unsigned int pin; 549 unsigned int pin;
549 550
551 cycle_kernel_lock();
550 pin = iminor(inode); 552 pin = iminor(inode);
551 if (pin >= giu_nr_pins) 553 if (pin >= giu_nr_pins)
552 return -EBADF; 554 return -EBADF;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 3edf1fc12963..1e1b81e57cdc 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -85,6 +85,7 @@
85#include <linux/poll.h> 85#include <linux/poll.h>
86#include <linux/proc_fs.h> 86#include <linux/proc_fs.h>
87#include <linux/mutex.h> 87#include <linux/mutex.h>
88#include <linux/smp_lock.h>
88#include <linux/sysctl.h> 89#include <linux/sysctl.h>
89#include <linux/version.h> 90#include <linux/version.h>
90#include <linux/fs.h> 91#include <linux/fs.h>
@@ -504,11 +505,12 @@ static int hwicap_open(struct inode *inode, struct file *file)
504 struct hwicap_drvdata *drvdata; 505 struct hwicap_drvdata *drvdata;
505 int status; 506 int status;
506 507
508 lock_kernel();
507 drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); 509 drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
508 510
509 status = mutex_lock_interruptible(&drvdata->sem); 511 status = mutex_lock_interruptible(&drvdata->sem);
510 if (status) 512 if (status)
511 return status; 513 goto out;
512 514
513 if (drvdata->is_open) { 515 if (drvdata->is_open) {
514 status = -EBUSY; 516 status = -EBUSY;
@@ -528,6 +530,8 @@ static int hwicap_open(struct inode *inode, struct file *file)
528 530
529 error: 531 error:
530 mutex_unlock(&drvdata->sem); 532 mutex_unlock(&drvdata->sem);
533 out:
534 unlock_kernel();
531 return status; 535 return status;
532} 536}
533 537
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43b71b69daa5..e522144cba3a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
174 Select this option if you want to enable the random number generator 174 Select this option if you want to enable the random number generator
175 on the HIFN 795x crypto adapters. 175 on the HIFN 795x crypto adapters.
176 176
177config CRYPTO_DEV_TALITOS
178 tristate "Talitos Freescale Security Engine (SEC)"
179 select CRYPTO_ALGAPI
180 select CRYPTO_AUTHENC
181 select HW_RANDOM
182 depends on FSL_SOC
183 help
184 Say 'Y' here to use the Freescale Security Engine (SEC)
185 to offload cryptographic algorithm computation.
186
187 The Freescale SEC is present on PowerQUICC 'E' processors, such
188 as the MPC8349E and MPC8548E.
189
190 To compile this driver as a module, choose M here: the module
191 will be called talitos.
192
193config CRYPTO_DEV_IXP4XX
194 tristate "Driver for IXP4xx crypto hardware acceleration"
195 depends on ARCH_IXP4XX
196 select CRYPTO_DES
197 select CRYPTO_ALGAPI
198 select CRYPTO_AUTHENC
199 select CRYPTO_BLKCIPHER
200 help
201 Driver for the IXP4xx NPE crypto engine.
202
177endif # CRYPTO_HW 203endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c0327f0dadc5..73557b2968d3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 81f3f950cd7d..4d22b21bd3e3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -29,7 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h> 32#include <linux/crypto.h>
34#include <linux/hw_random.h> 33#include <linux/hw_random.h>
35#include <linux/ktime.h> 34#include <linux/ktime.h>
@@ -369,7 +368,9 @@ static atomic_t hifn_dev_number;
369#define HIFN_D_DST_RSIZE 80*4 368#define HIFN_D_DST_RSIZE 80*4
370#define HIFN_D_RES_RSIZE 24*4 369#define HIFN_D_RES_RSIZE 24*4
371 370
372#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5 371#define HIFN_D_DST_DALIGN 4
372
373#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
373 374
374#define AES_MIN_KEY_SIZE 16 375#define AES_MIN_KEY_SIZE 16
375#define AES_MAX_KEY_SIZE 32 376#define AES_MAX_KEY_SIZE 32
@@ -535,10 +536,10 @@ struct hifn_crypt_command
535 */ 536 */
536struct hifn_mac_command 537struct hifn_mac_command
537{ 538{
538 volatile u16 masks; 539 volatile __le16 masks;
539 volatile u16 header_skip; 540 volatile __le16 header_skip;
540 volatile u16 source_count; 541 volatile __le16 source_count;
541 volatile u16 reserved; 542 volatile __le16 reserved;
542}; 543};
543 544
544#define HIFN_MAC_CMD_ALG_MASK 0x0001 545#define HIFN_MAC_CMD_ALG_MASK 0x0001
@@ -564,10 +565,10 @@ struct hifn_mac_command
564 565
565struct hifn_comp_command 566struct hifn_comp_command
566{ 567{
567 volatile u16 masks; 568 volatile __le16 masks;
568 volatile u16 header_skip; 569 volatile __le16 header_skip;
569 volatile u16 source_count; 570 volatile __le16 source_count;
570 volatile u16 reserved; 571 volatile __le16 reserved;
571}; 572};
572 573
573#define HIFN_COMP_CMD_SRCLEN_M 0xc000 574#define HIFN_COMP_CMD_SRCLEN_M 0xc000
@@ -583,10 +584,10 @@ struct hifn_comp_command
583 584
584struct hifn_base_result 585struct hifn_base_result
585{ 586{
586 volatile u16 flags; 587 volatile __le16 flags;
587 volatile u16 session; 588 volatile __le16 session;
588 volatile u16 src_cnt; /* 15:0 of source count */ 589 volatile __le16 src_cnt; /* 15:0 of source count */
589 volatile u16 dst_cnt; /* 15:0 of dest count */ 590 volatile __le16 dst_cnt; /* 15:0 of dest count */
590}; 591};
591 592
592#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ 593#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
@@ -597,8 +598,8 @@ struct hifn_base_result
597 598
598struct hifn_comp_result 599struct hifn_comp_result
599{ 600{
600 volatile u16 flags; 601 volatile __le16 flags;
601 volatile u16 crc; 602 volatile __le16 crc;
602}; 603};
603 604
604#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ 605#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
@@ -609,8 +610,8 @@ struct hifn_comp_result
609 610
610struct hifn_mac_result 611struct hifn_mac_result
611{ 612{
612 volatile u16 flags; 613 volatile __le16 flags;
613 volatile u16 reserved; 614 volatile __le16 reserved;
614 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ 615 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
615}; 616};
616 617
@@ -619,8 +620,8 @@ struct hifn_mac_result
619 620
620struct hifn_crypt_result 621struct hifn_crypt_result
621{ 622{
622 volatile u16 flags; 623 volatile __le16 flags;
623 volatile u16 reserved; 624 volatile __le16 reserved;
624}; 625};
625 626
626#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ 627#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
@@ -686,12 +687,12 @@ static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
686 687
687static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) 688static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
688{ 689{
689 writel(val, dev->bar[0] + reg); 690 writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
690} 691}
691 692
692static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) 693static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
693{ 694{
694 writel(val, dev->bar[1] + reg); 695 writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
695} 696}
696 697
697static void hifn_wait_puc(struct hifn_device *dev) 698static void hifn_wait_puc(struct hifn_device *dev)
@@ -894,7 +895,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
894 char *offtbl = NULL; 895 char *offtbl = NULL;
895 int i; 896 int i;
896 897
897 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 898 for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
898 if (pci2id[i].pci_vendor == dev->pdev->vendor && 899 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
899 pci2id[i].pci_prod == dev->pdev->device) { 900 pci2id[i].pci_prod == dev->pdev->device) {
900 offtbl = pci2id[i].card_id; 901 offtbl = pci2id[i].card_id;
@@ -1037,14 +1038,14 @@ static void hifn_init_registers(struct hifn_device *dev)
1037 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1038 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1038 1039
1039 /* write all 4 ring address registers */ 1040 /* write all 4 ring address registers */
1040 hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr + 1041 hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
1041 offsetof(struct hifn_dma, cmdr[0]))); 1042 offsetof(struct hifn_dma, cmdr[0]));
1042 hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr + 1043 hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
1043 offsetof(struct hifn_dma, srcr[0]))); 1044 offsetof(struct hifn_dma, srcr[0]));
1044 hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr + 1045 hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
1045 offsetof(struct hifn_dma, dstr[0]))); 1046 offsetof(struct hifn_dma, dstr[0]));
1046 hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr + 1047 hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
1047 offsetof(struct hifn_dma, resr[0]))); 1048 offsetof(struct hifn_dma, resr[0]));
1048 1049
1049 mdelay(2); 1050 mdelay(2);
1050#if 0 1051#if 0
@@ -1166,109 +1167,15 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
1166 return cmd_len; 1167 return cmd_len;
1167} 1168}
1168 1169
1169static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, 1170static int hifn_setup_cmd_desc(struct hifn_device *dev,
1170 unsigned int offset, unsigned int size) 1171 struct hifn_context *ctx, void *priv, unsigned int nbytes)
1171{
1172 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1173 int idx;
1174 dma_addr_t addr;
1175
1176 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1177
1178 idx = dma->srci;
1179
1180 dma->srcr[idx].p = __cpu_to_le32(addr);
1181 dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
1182 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
1183
1184 if (++idx == HIFN_D_SRC_RSIZE) {
1185 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1186 HIFN_D_JUMP |
1187 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1188 idx = 0;
1189 }
1190
1191 dma->srci = idx;
1192 dma->srcu++;
1193
1194 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1195 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1196 dev->flags |= HIFN_FLAG_SRC_BUSY;
1197 }
1198
1199 return size;
1200}
1201
1202static void hifn_setup_res_desc(struct hifn_device *dev)
1203{
1204 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1205
1206 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1207 HIFN_D_VALID | HIFN_D_LAST);
1208 /*
1209 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1210 * HIFN_D_LAST | HIFN_D_NOINVALID);
1211 */
1212
1213 if (++dma->resi == HIFN_D_RES_RSIZE) {
1214 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1215 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1216 dma->resi = 0;
1217 }
1218
1219 dma->resu++;
1220
1221 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1222 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1223 dev->flags |= HIFN_FLAG_RES_BUSY;
1224 }
1225}
1226
1227static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1228 unsigned offset, unsigned size)
1229{
1230 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1231 int idx;
1232 dma_addr_t addr;
1233
1234 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1235
1236 idx = dma->dsti;
1237 dma->dstr[idx].p = __cpu_to_le32(addr);
1238 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1239 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
1240
1241 if (++idx == HIFN_D_DST_RSIZE) {
1242 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1243 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1244 HIFN_D_LAST | HIFN_D_NOINVALID);
1245 idx = 0;
1246 }
1247 dma->dsti = idx;
1248 dma->dstu++;
1249
1250 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1251 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1252 dev->flags |= HIFN_FLAG_DST_BUSY;
1253 }
1254}
1255
1256static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1257 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1258 struct hifn_context *ctx)
1259{ 1172{
1260 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1173 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1261 int cmd_len, sa_idx; 1174 int cmd_len, sa_idx;
1262 u8 *buf, *buf_pos; 1175 u8 *buf, *buf_pos;
1263 u16 mask; 1176 u16 mask;
1264 1177
1265 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", 1178 sa_idx = dma->cmdi;
1266 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1267
1268 sa_idx = dma->resi;
1269
1270 hifn_setup_src_desc(dev, spage, soff, nbytes);
1271
1272 buf_pos = buf = dma->command_bufs[dma->cmdi]; 1179 buf_pos = buf = dma->command_bufs[dma->cmdi];
1273 1180
1274 mask = 0; 1181 mask = 0;
@@ -1370,16 +1277,113 @@ static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned
1370 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1277 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1371 dev->flags |= HIFN_FLAG_CMD_BUSY; 1278 dev->flags |= HIFN_FLAG_CMD_BUSY;
1372 } 1279 }
1373
1374 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1375 hifn_setup_res_desc(dev);
1376
1377 return 0; 1280 return 0;
1378 1281
1379err_out: 1282err_out:
1380 return -EINVAL; 1283 return -EINVAL;
1381} 1284}
1382 1285
1286static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1287 unsigned int offset, unsigned int size)
1288{
1289 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1290 int idx;
1291 dma_addr_t addr;
1292
1293 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1294
1295 idx = dma->srci;
1296
1297 dma->srcr[idx].p = __cpu_to_le32(addr);
1298 dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1299 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1300
1301 if (++idx == HIFN_D_SRC_RSIZE) {
1302 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1303 HIFN_D_JUMP |
1304 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1305 idx = 0;
1306 }
1307
1308 dma->srci = idx;
1309 dma->srcu++;
1310
1311 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1312 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1313 dev->flags |= HIFN_FLAG_SRC_BUSY;
1314 }
1315
1316 return size;
1317}
1318
1319static void hifn_setup_res_desc(struct hifn_device *dev)
1320{
1321 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1322
1323 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1324 HIFN_D_VALID | HIFN_D_LAST);
1325 /*
1326 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1327 * HIFN_D_LAST);
1328 */
1329
1330 if (++dma->resi == HIFN_D_RES_RSIZE) {
1331 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1332 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1333 dma->resi = 0;
1334 }
1335
1336 dma->resu++;
1337
1338 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1339 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1340 dev->flags |= HIFN_FLAG_RES_BUSY;
1341 }
1342}
1343
1344static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1345 unsigned offset, unsigned size)
1346{
1347 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1348 int idx;
1349 dma_addr_t addr;
1350
1351 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1352
1353 idx = dma->dsti;
1354 dma->dstr[idx].p = __cpu_to_le32(addr);
1355 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1356 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1357
1358 if (++idx == HIFN_D_DST_RSIZE) {
1359 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1360 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1361 HIFN_D_LAST);
1362 idx = 0;
1363 }
1364 dma->dsti = idx;
1365 dma->dstu++;
1366
1367 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1368 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1369 dev->flags |= HIFN_FLAG_DST_BUSY;
1370 }
1371}
1372
1373static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1374 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1375 struct hifn_context *ctx)
1376{
1377 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1378 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1379
1380 hifn_setup_src_desc(dev, spage, soff, nbytes);
1381 hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
1382 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1383 hifn_setup_res_desc(dev);
1384 return 0;
1385}
1386
1383static int ablkcipher_walk_init(struct ablkcipher_walk *w, 1387static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1384 int num, gfp_t gfp_flags) 1388 int num, gfp_t gfp_flags)
1385{ 1389{
@@ -1431,7 +1435,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1431 return -EINVAL; 1435 return -EINVAL;
1432 1436
1433 while (size) { 1437 while (size) {
1434 copy = min(drest, src->length); 1438 copy = min(drest, min(size, src->length));
1435 1439
1436 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); 1440 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1437 memcpy(daddr, saddr + src->offset, copy); 1441 memcpy(daddr, saddr + src->offset, copy);
@@ -1458,10 +1462,6 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1458static int ablkcipher_walk(struct ablkcipher_request *req, 1462static int ablkcipher_walk(struct ablkcipher_request *req,
1459 struct ablkcipher_walk *w) 1463 struct ablkcipher_walk *w)
1460{ 1464{
1461 unsigned blocksize =
1462 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1463 unsigned alignmask =
1464 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1465 struct scatterlist *src, *dst, *t; 1465 struct scatterlist *src, *dst, *t;
1466 void *daddr; 1466 void *daddr;
1467 unsigned int nbytes = req->nbytes, offset, copy, diff; 1467 unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1477,16 +1477,14 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1477 dst = &req->dst[idx]; 1477 dst = &req->dst[idx];
1478 1478
1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " 1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1480 "blocksize: %u, nbytes: %u.\n", 1480 "nbytes: %u.\n",
1481 __func__, src->length, dst->length, src->offset, 1481 __func__, src->length, dst->length, src->offset,
1482 dst->offset, offset, blocksize, nbytes); 1482 dst->offset, offset, nbytes);
1483 1483
1484 if (src->length & (blocksize - 1) || 1484 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1485 src->offset & (alignmask - 1) || 1485 !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
1486 dst->length & (blocksize - 1) || 1486 offset) {
1487 dst->offset & (alignmask - 1) || 1487 unsigned slen = min(src->length - offset, nbytes);
1488 offset) {
1489 unsigned slen = src->length - offset;
1490 unsigned dlen = PAGE_SIZE; 1488 unsigned dlen = PAGE_SIZE;
1491 1489
1492 t = &w->cache[idx]; 1490 t = &w->cache[idx];
@@ -1498,8 +1496,8 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1498 1496
1499 idx += err; 1497 idx += err;
1500 1498
1501 copy = slen & ~(blocksize - 1); 1499 copy = slen & ~(HIFN_D_DST_DALIGN - 1);
1502 diff = slen & (blocksize - 1); 1500 diff = slen & (HIFN_D_DST_DALIGN - 1);
1503 1501
1504 if (dlen < nbytes) { 1502 if (dlen < nbytes) {
1505 /* 1503 /*
@@ -1507,7 +1505,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1507 * to put there additional blocksized chunk, 1505 * to put there additional blocksized chunk,
1508 * so we mark that page as containing only 1506 * so we mark that page as containing only
1509 * blocksize aligned chunks: 1507 * blocksize aligned chunks:
1510 * t->length = (slen & ~(blocksize - 1)); 1508 * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
1511 * and increase number of bytes to be processed 1509 * and increase number of bytes to be processed
1512 * in next chunk: 1510 * in next chunk:
1513 * nbytes += diff; 1511 * nbytes += diff;
@@ -1544,7 +1542,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1544 1542
1545 kunmap_atomic(daddr, KM_SOFTIRQ0); 1543 kunmap_atomic(daddr, KM_SOFTIRQ0);
1546 } else { 1544 } else {
1547 nbytes -= src->length; 1545 nbytes -= min(src->length, nbytes);
1548 idx++; 1546 idx++;
1549 } 1547 }
1550 1548
@@ -1563,14 +1561,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1563 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 1561 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1564 struct hifn_device *dev = ctx->dev; 1562 struct hifn_device *dev = ctx->dev;
1565 struct page *spage, *dpage; 1563 struct page *spage, *dpage;
1566 unsigned long soff, doff, flags; 1564 unsigned long soff, doff, dlen, flags;
1567 unsigned int nbytes = req->nbytes, idx = 0, len; 1565 unsigned int nbytes = req->nbytes, idx = 0, len;
1568 int err = -EINVAL, sg_num; 1566 int err = -EINVAL, sg_num;
1569 struct scatterlist *src, *dst, *t; 1567 struct scatterlist *src, *dst, *t;
1570 unsigned blocksize =
1571 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1572 unsigned alignmask =
1573 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1574 1568
1575 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) 1569 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1576 goto err_out_exit; 1570 goto err_out_exit;
@@ -1578,17 +1572,14 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1578 ctx->walk.flags = 0; 1572 ctx->walk.flags = 0;
1579 1573
1580 while (nbytes) { 1574 while (nbytes) {
1581 src = &req->src[idx];
1582 dst = &req->dst[idx]; 1575 dst = &req->dst[idx];
1576 dlen = min(dst->length, nbytes);
1583 1577
1584 if (src->length & (blocksize - 1) || 1578 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1585 src->offset & (alignmask - 1) || 1579 !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
1586 dst->length & (blocksize - 1) ||
1587 dst->offset & (alignmask - 1)) {
1588 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; 1580 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1589 }
1590 1581
1591 nbytes -= src->length; 1582 nbytes -= dlen;
1592 idx++; 1583 idx++;
1593 } 1584 }
1594 1585
@@ -1602,7 +1593,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1602 idx = 0; 1593 idx = 0;
1603 1594
1604 sg_num = ablkcipher_walk(req, &ctx->walk); 1595 sg_num = ablkcipher_walk(req, &ctx->walk);
1605 1596 if (sg_num < 0) {
1597 err = sg_num;
1598 goto err_out_exit;
1599 }
1606 atomic_set(&ctx->sg_num, sg_num); 1600 atomic_set(&ctx->sg_num, sg_num);
1607 1601
1608 spin_lock_irqsave(&dev->lock, flags); 1602 spin_lock_irqsave(&dev->lock, flags);
@@ -1640,7 +1634,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1640 if (err) 1634 if (err)
1641 goto err_out; 1635 goto err_out;
1642 1636
1643 nbytes -= len; 1637 nbytes -= min(len, nbytes);
1644 } 1638 }
1645 1639
1646 dev->active = HIFN_DEFAULT_ACTIVE_NUM; 1640 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
@@ -1651,7 +1645,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1651err_out: 1645err_out:
1652 spin_unlock_irqrestore(&dev->lock, flags); 1646 spin_unlock_irqrestore(&dev->lock, flags);
1653err_out_exit: 1647err_out_exit:
1654 if (err && printk_ratelimit()) 1648 if (err)
1655 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " 1649 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1656 "type: %u, err: %d.\n", 1650 "type: %u, err: %d.\n",
1657 dev->name, ctx->iv, ctx->ivsize, 1651 dev->name, ctx->iv, ctx->ivsize,
@@ -1745,8 +1739,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
1745 return -EINVAL; 1739 return -EINVAL;
1746 1740
1747 while (size) { 1741 while (size) {
1748 1742 copy = min(srest, min(dst->length, size));
1749 copy = min(dst->length, srest);
1750 1743
1751 daddr = kmap_atomic(sg_page(dst), KM_IRQ0); 1744 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1752 memcpy(daddr + dst->offset + offset, saddr, copy); 1745 memcpy(daddr + dst->offset + offset, saddr, copy);
@@ -1803,7 +1796,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1803 sg_page(dst), dst->length, nbytes); 1796 sg_page(dst), dst->length, nbytes);
1804 1797
1805 if (!t->length) { 1798 if (!t->length) {
1806 nbytes -= dst->length; 1799 nbytes -= min(dst->length, nbytes);
1807 idx++; 1800 idx++;
1808 continue; 1801 continue;
1809 } 1802 }
@@ -2202,9 +2195,9 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2202 return err; 2195 return err;
2203 2196
2204 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) 2197 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2205 err = hifn_process_queue(dev); 2198 hifn_process_queue(dev);
2206 2199
2207 return err; 2200 return -EINPROGRESS;
2208} 2201}
2209 2202
2210/* 2203/*
@@ -2364,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2364 * 3DES ECB, CBC, CFB and OFB modes. 2357 * 3DES ECB, CBC, CFB and OFB modes.
2365 */ 2358 */
2366 { 2359 {
2367 .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2360 .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
2368 .ablkcipher = { 2361 .ablkcipher = {
2369 .min_keysize = HIFN_3DES_KEY_LENGTH, 2362 .min_keysize = HIFN_3DES_KEY_LENGTH,
2370 .max_keysize = HIFN_3DES_KEY_LENGTH, 2363 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2374,7 +2367,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2374 }, 2367 },
2375 }, 2368 },
2376 { 2369 {
2377 .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2370 .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
2378 .ablkcipher = { 2371 .ablkcipher = {
2379 .min_keysize = HIFN_3DES_KEY_LENGTH, 2372 .min_keysize = HIFN_3DES_KEY_LENGTH,
2380 .max_keysize = HIFN_3DES_KEY_LENGTH, 2373 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2384,8 +2377,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2384 }, 2377 },
2385 }, 2378 },
2386 { 2379 {
2387 .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2380 .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
2388 .ablkcipher = { 2381 .ablkcipher = {
2382 .ivsize = HIFN_IV_LENGTH,
2389 .min_keysize = HIFN_3DES_KEY_LENGTH, 2383 .min_keysize = HIFN_3DES_KEY_LENGTH,
2390 .max_keysize = HIFN_3DES_KEY_LENGTH, 2384 .max_keysize = HIFN_3DES_KEY_LENGTH,
2391 .setkey = hifn_setkey, 2385 .setkey = hifn_setkey,
@@ -2394,7 +2388,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2394 }, 2388 },
2395 }, 2389 },
2396 { 2390 {
2397 .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2391 .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
2398 .ablkcipher = { 2392 .ablkcipher = {
2399 .min_keysize = HIFN_3DES_KEY_LENGTH, 2393 .min_keysize = HIFN_3DES_KEY_LENGTH,
2400 .max_keysize = HIFN_3DES_KEY_LENGTH, 2394 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2408,7 +2402,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2408 * DES ECB, CBC, CFB and OFB modes. 2402 * DES ECB, CBC, CFB and OFB modes.
2409 */ 2403 */
2410 { 2404 {
2411 .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8, 2405 .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
2412 .ablkcipher = { 2406 .ablkcipher = {
2413 .min_keysize = HIFN_DES_KEY_LENGTH, 2407 .min_keysize = HIFN_DES_KEY_LENGTH,
2414 .max_keysize = HIFN_DES_KEY_LENGTH, 2408 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2418,7 +2412,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2418 }, 2412 },
2419 }, 2413 },
2420 { 2414 {
2421 .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8, 2415 .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
2422 .ablkcipher = { 2416 .ablkcipher = {
2423 .min_keysize = HIFN_DES_KEY_LENGTH, 2417 .min_keysize = HIFN_DES_KEY_LENGTH,
2424 .max_keysize = HIFN_DES_KEY_LENGTH, 2418 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2428,8 +2422,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2428 }, 2422 },
2429 }, 2423 },
2430 { 2424 {
2431 .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8, 2425 .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
2432 .ablkcipher = { 2426 .ablkcipher = {
2427 .ivsize = HIFN_IV_LENGTH,
2433 .min_keysize = HIFN_DES_KEY_LENGTH, 2428 .min_keysize = HIFN_DES_KEY_LENGTH,
2434 .max_keysize = HIFN_DES_KEY_LENGTH, 2429 .max_keysize = HIFN_DES_KEY_LENGTH,
2435 .setkey = hifn_setkey, 2430 .setkey = hifn_setkey,
@@ -2438,7 +2433,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2438 }, 2433 },
2439 }, 2434 },
2440 { 2435 {
2441 .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8, 2436 .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
2442 .ablkcipher = { 2437 .ablkcipher = {
2443 .min_keysize = HIFN_DES_KEY_LENGTH, 2438 .min_keysize = HIFN_DES_KEY_LENGTH,
2444 .max_keysize = HIFN_DES_KEY_LENGTH, 2439 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2452,7 +2447,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2452 * AES ECB, CBC, CFB and OFB modes. 2447 * AES ECB, CBC, CFB and OFB modes.
2453 */ 2448 */
2454 { 2449 {
2455 .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2450 .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
2456 .ablkcipher = { 2451 .ablkcipher = {
2457 .min_keysize = AES_MIN_KEY_SIZE, 2452 .min_keysize = AES_MIN_KEY_SIZE,
2458 .max_keysize = AES_MAX_KEY_SIZE, 2453 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2462,8 +2457,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2462 }, 2457 },
2463 }, 2458 },
2464 { 2459 {
2465 .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16, 2460 .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
2466 .ablkcipher = { 2461 .ablkcipher = {
2462 .ivsize = HIFN_AES_IV_LENGTH,
2467 .min_keysize = AES_MIN_KEY_SIZE, 2463 .min_keysize = AES_MIN_KEY_SIZE,
2468 .max_keysize = AES_MAX_KEY_SIZE, 2464 .max_keysize = AES_MAX_KEY_SIZE,
2469 .setkey = hifn_setkey, 2465 .setkey = hifn_setkey,
@@ -2472,7 +2468,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2472 }, 2468 },
2473 }, 2469 },
2474 { 2470 {
2475 .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2471 .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
2476 .ablkcipher = { 2472 .ablkcipher = {
2477 .min_keysize = AES_MIN_KEY_SIZE, 2473 .min_keysize = AES_MIN_KEY_SIZE,
2478 .max_keysize = AES_MAX_KEY_SIZE, 2474 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2482,7 +2478,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2482 }, 2478 },
2483 }, 2479 },
2484 { 2480 {
2485 .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2481 .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
2486 .ablkcipher = { 2482 .ablkcipher = {
2487 .min_keysize = AES_MIN_KEY_SIZE, 2483 .min_keysize = AES_MIN_KEY_SIZE,
2488 .max_keysize = AES_MAX_KEY_SIZE, 2484 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2514,15 +2510,14 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2514 return -ENOMEM; 2510 return -ENOMEM;
2515 2511
2516 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); 2512 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2517 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name); 2513 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
2514 t->drv_name, dev->name);
2518 2515
2519 alg->alg.cra_priority = 300; 2516 alg->alg.cra_priority = 300;
2520 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 2517 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2521 alg->alg.cra_blocksize = t->bsize; 2518 alg->alg.cra_blocksize = t->bsize;
2522 alg->alg.cra_ctxsize = sizeof(struct hifn_context); 2519 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2523 alg->alg.cra_alignmask = 15; 2520 alg->alg.cra_alignmask = 0;
2524 if (t->bsize == 8)
2525 alg->alg.cra_alignmask = 3;
2526 alg->alg.cra_type = &crypto_ablkcipher_type; 2521 alg->alg.cra_type = &crypto_ablkcipher_type;
2527 alg->alg.cra_module = THIS_MODULE; 2522 alg->alg.cra_module = THIS_MODULE;
2528 alg->alg.cra_u.ablkcipher = t->ablkcipher; 2523 alg->alg.cra_u.ablkcipher = t->ablkcipher;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
new file mode 100644
index 000000000000..42a107fe9233
--- /dev/null
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -0,0 +1,1506 @@
1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20
21#include <crypto/ctr.h>
22#include <crypto/des.h>
23#include <crypto/aes.h>
24#include <crypto/sha.h>
25#include <crypto/algapi.h>
26#include <crypto/aead.h>
27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h>
29
30#include <asm/arch/npe.h>
31#include <asm/arch/qmgr.h>
32
33#define MAX_KEYLEN 32
34
35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36#define NPE_CTX_LEN 80
37#define AES_BLOCK128 16
38
39#define NPE_OP_HASH_VERIFY 0x01
40#define NPE_OP_CCM_ENABLE 0x04
41#define NPE_OP_CRYPT_ENABLE 0x08
42#define NPE_OP_HASH_ENABLE 0x10
43#define NPE_OP_NOT_IN_PLACE 0x20
44#define NPE_OP_HMAC_DISABLE 0x40
45#define NPE_OP_CRYPT_ENCRYPT 0x80
46
47#define NPE_OP_CCM_GEN_MIC 0xcc
48#define NPE_OP_HASH_GEN_ICV 0x50
49#define NPE_OP_ENC_GEN_KEY 0xc9
50
51#define MOD_ECB 0x0000
52#define MOD_CTR 0x1000
53#define MOD_CBC_ENC 0x2000
54#define MOD_CBC_DEC 0x3000
55#define MOD_CCM_ENC 0x4000
56#define MOD_CCM_DEC 0x5000
57
58#define KEYLEN_128 4
59#define KEYLEN_192 6
60#define KEYLEN_256 8
61
62#define CIPH_DECR 0x0000
63#define CIPH_ENCR 0x0400
64
65#define MOD_DES 0x0000
66#define MOD_TDEA2 0x0100
67#define MOD_3DES 0x0200
68#define MOD_AES 0x0800
69#define MOD_AES128 (0x0800 | KEYLEN_128)
70#define MOD_AES192 (0x0900 | KEYLEN_192)
71#define MOD_AES256 (0x0a00 | KEYLEN_256)
72
73#define MAX_IVLEN 16
74#define NPE_ID 2 /* NPE C */
75#define NPE_QLEN 16
76/* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78#define NPE_QLEN_TOTAL 64
79
80#define SEND_QID 29
81#define RECV_QID 30
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_IPAD_VALUE 0x36
92#define HMAC_OPAD_VALUE 0x5C
93#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
94
95#define MD5_DIGEST_SIZE 16
96
97struct buffer_desc {
98 u32 phys_next;
99 u16 buf_len;
100 u16 pkt_len;
101 u32 phys_addr;
102 u32 __reserved[4];
103 struct buffer_desc *next;
104};
105
106struct crypt_ctl {
107 u8 mode; /* NPE_OP_* operation mode */
108 u8 init_len;
109 u16 reserved;
110 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
111 u32 icv_rev_aes; /* icv or rev aes */
112 u32 src_buf;
113 u32 dst_buf;
114 u16 auth_offs; /* Authentication start offset */
115 u16 auth_len; /* Authentication data length */
116 u16 crypt_offs; /* Cryption start offset */
117 u16 crypt_len; /* Cryption data length */
118 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
119 u32 crypto_ctx; /* NPE Crypto Param structure address */
120
121 /* Used by Host: 4*4 bytes*/
122 unsigned ctl_flags;
123 union {
124 struct ablkcipher_request *ablk_req;
125 struct aead_request *aead_req;
126 struct crypto_tfm *tfm;
127 } data;
128 struct buffer_desc *regist_buf;
129 u8 *regist_ptr;
130};
131
132struct ablk_ctx {
133 struct buffer_desc *src;
134 struct buffer_desc *dst;
135 unsigned src_nents;
136 unsigned dst_nents;
137};
138
139struct aead_ctx {
140 struct buffer_desc *buffer;
141 unsigned short assoc_nents;
142 unsigned short src_nents;
143 struct scatterlist ivlist;
144 /* used when the hmac is not on one sg entry */
145 u8 *hmac_virt;
146 int encrypt;
147};
148
149struct ix_hash_algo {
150 u32 cfgword;
151 unsigned char *icv;
152};
153
154struct ix_sa_dir {
155 unsigned char *npe_ctx;
156 dma_addr_t npe_ctx_phys;
157 int npe_ctx_idx;
158 u8 npe_mode;
159};
160
161struct ixp_ctx {
162 struct ix_sa_dir encrypt;
163 struct ix_sa_dir decrypt;
164 int authkey_len;
165 u8 authkey[MAX_KEYLEN];
166 int enckey_len;
167 u8 enckey[MAX_KEYLEN];
168 u8 salt[MAX_IVLEN];
169 u8 nonce[CTR_RFC3686_NONCE_SIZE];
170 unsigned salted;
171 atomic_t configuring;
172 struct completion completion;
173};
174
175struct ixp_alg {
176 struct crypto_alg crypto;
177 const struct ix_hash_algo *hash;
178 u32 cfg_enc;
179 u32 cfg_dec;
180
181 int registered;
182};
183
184static const struct ix_hash_algo hash_alg_md5 = {
185 .cfgword = 0xAA010004,
186 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
187 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
188};
189static const struct ix_hash_algo hash_alg_sha1 = {
190 .cfgword = 0x00000005,
191 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
192 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
193};
194
195static struct npe *npe_c;
196static struct dma_pool *buffer_pool = NULL;
197static struct dma_pool *ctx_pool = NULL;
198
199static struct crypt_ctl *crypt_virt = NULL;
200static dma_addr_t crypt_phys;
201
202static int support_aes = 1;
203
204static void dev_release(struct device *dev)
205{
206 return;
207}
208
209#define DRIVER_NAME "ixp4xx_crypto"
210static struct platform_device pseudo_dev = {
211 .name = DRIVER_NAME,
212 .id = 0,
213 .num_resources = 0,
214 .dev = {
215 .coherent_dma_mask = DMA_32BIT_MASK,
216 .release = dev_release,
217 }
218};
219
220static struct device *dev = &pseudo_dev.dev;
221
222static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
223{
224 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
225}
226
227static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
228{
229 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
230}
231
232static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
233{
234 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
235}
236
237static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
238{
239 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
240}
241
242static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
243{
244 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
245}
246
247static int setup_crypt_desc(void)
248{
249 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
250 crypt_virt = dma_alloc_coherent(dev,
251 NPE_QLEN * sizeof(struct crypt_ctl),
252 &crypt_phys, GFP_KERNEL);
253 if (!crypt_virt)
254 return -ENOMEM;
255 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
256 return 0;
257}
258
259static spinlock_t desc_lock;
260static struct crypt_ctl *get_crypt_desc(void)
261{
262 int i;
263 static int idx = 0;
264 unsigned long flags;
265
266 spin_lock_irqsave(&desc_lock, flags);
267
268 if (unlikely(!crypt_virt))
269 setup_crypt_desc();
270 if (unlikely(!crypt_virt)) {
271 spin_unlock_irqrestore(&desc_lock, flags);
272 return NULL;
273 }
274 i = idx;
275 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
276 if (++idx >= NPE_QLEN)
277 idx = 0;
278 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return crypt_virt +i;
281 } else {
282 spin_unlock_irqrestore(&desc_lock, flags);
283 return NULL;
284 }
285}
286
287static spinlock_t emerg_lock;
288static struct crypt_ctl *get_crypt_desc_emerg(void)
289{
290 int i;
291 static int idx = NPE_QLEN;
292 struct crypt_ctl *desc;
293 unsigned long flags;
294
295 desc = get_crypt_desc();
296 if (desc)
297 return desc;
298 if (unlikely(!crypt_virt))
299 return NULL;
300
301 spin_lock_irqsave(&emerg_lock, flags);
302 i = idx;
303 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
304 if (++idx >= NPE_QLEN_TOTAL)
305 idx = NPE_QLEN;
306 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
307 spin_unlock_irqrestore(&emerg_lock, flags);
308 return crypt_virt +i;
309 } else {
310 spin_unlock_irqrestore(&emerg_lock, flags);
311 return NULL;
312 }
313}
314
315static void free_buf_chain(struct buffer_desc *buf, u32 phys)
316{
317 while (buf) {
318 struct buffer_desc *buf1;
319 u32 phys1;
320
321 buf1 = buf->next;
322 phys1 = buf->phys_next;
323 dma_pool_free(buffer_pool, buf, phys);
324 buf = buf1;
325 phys = phys1;
326 }
327}
328
329static struct tasklet_struct crypto_done_tasklet;
330
331static void finish_scattered_hmac(struct crypt_ctl *crypt)
332{
333 struct aead_request *req = crypt->data.aead_req;
334 struct aead_ctx *req_ctx = aead_request_ctx(req);
335 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
336 int authsize = crypto_aead_authsize(tfm);
337 int decryptlen = req->cryptlen - authsize;
338
339 if (req_ctx->encrypt) {
340 scatterwalk_map_and_copy(req_ctx->hmac_virt,
341 req->src, decryptlen, authsize, 1);
342 }
343 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
344}
345
346static void one_packet(dma_addr_t phys)
347{
348 struct crypt_ctl *crypt;
349 struct ixp_ctx *ctx;
350 int failed;
351 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
352
353 failed = phys & 0x1 ? -EBADMSG : 0;
354 phys &= ~0x3;
355 crypt = crypt_phys2virt(phys);
356
357 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
358 case CTL_FLAG_PERFORM_AEAD: {
359 struct aead_request *req = crypt->data.aead_req;
360 struct aead_ctx *req_ctx = aead_request_ctx(req);
361 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
362 DMA_TO_DEVICE);
363 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
364 dma_unmap_sg(dev, req->src, req_ctx->src_nents,
365 DMA_BIDIRECTIONAL);
366
367 free_buf_chain(req_ctx->buffer, crypt->src_buf);
368 if (req_ctx->hmac_virt) {
369 finish_scattered_hmac(crypt);
370 }
371 req->base.complete(&req->base, failed);
372 break;
373 }
374 case CTL_FLAG_PERFORM_ABLK: {
375 struct ablkcipher_request *req = crypt->data.ablk_req;
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377 int nents;
378 if (req_ctx->dst) {
379 nents = req_ctx->dst_nents;
380 dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
381 free_buf_chain(req_ctx->dst, crypt->dst_buf);
382 src_direction = DMA_TO_DEVICE;
383 }
384 nents = req_ctx->src_nents;
385 dma_unmap_sg(dev, req->src, nents, src_direction);
386 free_buf_chain(req_ctx->src, crypt->src_buf);
387 req->base.complete(&req->base, failed);
388 break;
389 }
390 case CTL_FLAG_GEN_ICV:
391 ctx = crypto_tfm_ctx(crypt->data.tfm);
392 dma_pool_free(ctx_pool, crypt->regist_ptr,
393 crypt->regist_buf->phys_addr);
394 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
395 if (atomic_dec_and_test(&ctx->configuring))
396 complete(&ctx->completion);
397 break;
398 case CTL_FLAG_GEN_REVAES:
399 ctx = crypto_tfm_ctx(crypt->data.tfm);
400 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 default:
405 BUG();
406 }
407 crypt->ctl_flags = CTL_FLAG_UNUSED;
408}
409
410static void irqhandler(void *_unused)
411{
412 tasklet_schedule(&crypto_done_tasklet);
413}
414
415static void crypto_done_action(unsigned long arg)
416{
417 int i;
418
419 for(i=0; i<4; i++) {
420 dma_addr_t phys = qmgr_get_entry(RECV_QID);
421 if (!phys)
422 return;
423 one_packet(phys);
424 }
425 tasklet_schedule(&crypto_done_tasklet);
426}
427
428static int init_ixp_crypto(void)
429{
430 int ret = -ENODEV;
431
432 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
433 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
434 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
435 return ret;
436 }
437 npe_c = npe_request(NPE_ID);
438 if (!npe_c)
439 return ret;
440
441 if (!npe_running(npe_c)) {
442 npe_load_firmware(npe_c, npe_name(npe_c), dev);
443 }
444
445 /* buffer_pool will also be used to sometimes store the hmac,
446 * so assure it is large enough
447 */
448 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
449 buffer_pool = dma_pool_create("buffer", dev,
450 sizeof(struct buffer_desc), 32, 0);
451 ret = -ENOMEM;
452 if (!buffer_pool) {
453 goto err;
454 }
455 ctx_pool = dma_pool_create("context", dev,
456 NPE_CTX_LEN, 16, 0);
457 if (!ctx_pool) {
458 goto err;
459 }
460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
461 if (ret)
462 goto err;
463 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
464 if (ret) {
465 qmgr_release_queue(SEND_QID);
466 goto err;
467 }
468 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
469 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
470
471 qmgr_enable_irq(RECV_QID);
472 return 0;
473err:
474 if (ctx_pool)
475 dma_pool_destroy(ctx_pool);
476 if (buffer_pool)
477 dma_pool_destroy(buffer_pool);
478 npe_release(npe_c);
479 return ret;
480}
481
482static void release_ixp_crypto(void)
483{
484 qmgr_disable_irq(RECV_QID);
485 tasklet_kill(&crypto_done_tasklet);
486
487 qmgr_release_queue(SEND_QID);
488 qmgr_release_queue(RECV_QID);
489
490 dma_pool_destroy(ctx_pool);
491 dma_pool_destroy(buffer_pool);
492
493 npe_release(npe_c);
494
495 if (crypt_virt) {
496 dma_free_coherent(dev,
497 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
498 crypt_virt, crypt_phys);
499 }
500 return;
501}
502
503static void reset_sa_dir(struct ix_sa_dir *dir)
504{
505 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
506 dir->npe_ctx_idx = 0;
507 dir->npe_mode = 0;
508}
509
510static int init_sa_dir(struct ix_sa_dir *dir)
511{
512 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
513 if (!dir->npe_ctx) {
514 return -ENOMEM;
515 }
516 reset_sa_dir(dir);
517 return 0;
518}
519
520static void free_sa_dir(struct ix_sa_dir *dir)
521{
522 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
523 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
524}
525
526static int init_tfm(struct crypto_tfm *tfm)
527{
528 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
529 int ret;
530
531 atomic_set(&ctx->configuring, 0);
532 ret = init_sa_dir(&ctx->encrypt);
533 if (ret)
534 return ret;
535 ret = init_sa_dir(&ctx->decrypt);
536 if (ret) {
537 free_sa_dir(&ctx->encrypt);
538 }
539 return ret;
540}
541
542static int init_tfm_ablk(struct crypto_tfm *tfm)
543{
544 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
545 return init_tfm(tfm);
546}
547
548static int init_tfm_aead(struct crypto_tfm *tfm)
549{
550 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
551 return init_tfm(tfm);
552}
553
554static void exit_tfm(struct crypto_tfm *tfm)
555{
556 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
557 free_sa_dir(&ctx->encrypt);
558 free_sa_dir(&ctx->decrypt);
559}
560
561static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
562 int init_len, u32 ctx_addr, const u8 *key, int key_len)
563{
564 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
565 struct crypt_ctl *crypt;
566 struct buffer_desc *buf;
567 int i;
568 u8 *pad;
569 u32 pad_phys, buf_phys;
570
571 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
572 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
573 if (!pad)
574 return -ENOMEM;
575 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
576 if (!buf) {
577 dma_pool_free(ctx_pool, pad, pad_phys);
578 return -ENOMEM;
579 }
580 crypt = get_crypt_desc_emerg();
581 if (!crypt) {
582 dma_pool_free(ctx_pool, pad, pad_phys);
583 dma_pool_free(buffer_pool, buf, buf_phys);
584 return -EAGAIN;
585 }
586
587 memcpy(pad, key, key_len);
588 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
589 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
590 pad[i] ^= xpad;
591 }
592
593 crypt->data.tfm = tfm;
594 crypt->regist_ptr = pad;
595 crypt->regist_buf = buf;
596
597 crypt->auth_offs = 0;
598 crypt->auth_len = HMAC_PAD_BLOCKLEN;
599 crypt->crypto_ctx = ctx_addr;
600 crypt->src_buf = buf_phys;
601 crypt->icv_rev_aes = target;
602 crypt->mode = NPE_OP_HASH_GEN_ICV;
603 crypt->init_len = init_len;
604 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
605
606 buf->next = 0;
607 buf->buf_len = HMAC_PAD_BLOCKLEN;
608 buf->pkt_len = 0;
609 buf->phys_addr = pad_phys;
610
611 atomic_inc(&ctx->configuring);
612 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
613 BUG_ON(qmgr_stat_overflow(SEND_QID));
614 return 0;
615}
616
617static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
618 const u8 *key, int key_len, unsigned digest_len)
619{
620 u32 itarget, otarget, npe_ctx_addr;
621 unsigned char *cinfo;
622 int init_len, ret = 0;
623 u32 cfgword;
624 struct ix_sa_dir *dir;
625 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
626 const struct ix_hash_algo *algo;
627
628 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
629 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
630 algo = ix_hash(tfm);
631
632 /* write cfg word to cryptinfo */
633 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
634 *(u32*)cinfo = cpu_to_be32(cfgword);
635 cinfo += sizeof(cfgword);
636
637 /* write ICV to cryptinfo */
638 memcpy(cinfo, algo->icv, digest_len);
639 cinfo += digest_len;
640
641 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
642 + sizeof(algo->cfgword);
643 otarget = itarget + digest_len;
644 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
645 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
646
647 dir->npe_ctx_idx += init_len;
648 dir->npe_mode |= NPE_OP_HASH_ENABLE;
649
650 if (!encrypt)
651 dir->npe_mode |= NPE_OP_HASH_VERIFY;
652
653 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
654 init_len, npe_ctx_addr, key, key_len);
655 if (ret)
656 return ret;
657 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
658 init_len, npe_ctx_addr, key, key_len);
659}
660
661static int gen_rev_aes_key(struct crypto_tfm *tfm)
662{
663 struct crypt_ctl *crypt;
664 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
665 struct ix_sa_dir *dir = &ctx->decrypt;
666
667 crypt = get_crypt_desc_emerg();
668 if (!crypt) {
669 return -EAGAIN;
670 }
671 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
672
673 crypt->data.tfm = tfm;
674 crypt->crypt_offs = 0;
675 crypt->crypt_len = AES_BLOCK128;
676 crypt->src_buf = 0;
677 crypt->crypto_ctx = dir->npe_ctx_phys;
678 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
679 crypt->mode = NPE_OP_ENC_GEN_KEY;
680 crypt->init_len = dir->npe_ctx_idx;
681 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
682
683 atomic_inc(&ctx->configuring);
684 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
685 BUG_ON(qmgr_stat_overflow(SEND_QID));
686 return 0;
687}
688
689static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
690 const u8 *key, int key_len)
691{
692 u8 *cinfo;
693 u32 cipher_cfg;
694 u32 keylen_cfg = 0;
695 struct ix_sa_dir *dir;
696 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
697 u32 *flags = &tfm->crt_flags;
698
699 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
700 cinfo = dir->npe_ctx;
701
702 if (encrypt) {
703 cipher_cfg = cipher_cfg_enc(tfm);
704 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
705 } else {
706 cipher_cfg = cipher_cfg_dec(tfm);
707 }
708 if (cipher_cfg & MOD_AES) {
709 switch (key_len) {
710 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
711 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
712 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
713 default:
714 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
715 return -EINVAL;
716 }
717 cipher_cfg |= keylen_cfg;
718 } else if (cipher_cfg & MOD_3DES) {
719 const u32 *K = (const u32 *)key;
720 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
721 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
722 {
723 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
724 return -EINVAL;
725 }
726 } else {
727 u32 tmp[DES_EXPKEY_WORDS];
728 if (des_ekey(tmp, key) == 0) {
729 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
730 }
731 }
732 /* write cfg word to cryptinfo */
733 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
734 cinfo += sizeof(cipher_cfg);
735
736 /* write cipher key to cryptinfo */
737 memcpy(cinfo, key, key_len);
738 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
739 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
740 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
741 key_len = DES3_EDE_KEY_SIZE;
742 }
743 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
744 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
745 if ((cipher_cfg & MOD_AES) && !encrypt) {
746 return gen_rev_aes_key(tfm);
747 }
748 return 0;
749}
750
751static int count_sg(struct scatterlist *sg, int nbytes)
752{
753 int i;
754 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
755 nbytes -= sg->length;
756 return i;
757}
758
759static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
760 unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
761{
762 int nents = 0;
763
764 while (nbytes > 0) {
765 struct buffer_desc *next_buf;
766 u32 next_buf_phys;
767 unsigned len = min(nbytes, sg_dma_len(sg));
768
769 nents++;
770 nbytes -= len;
771 if (!buf->phys_addr) {
772 buf->phys_addr = sg_dma_address(sg);
773 buf->buf_len = len;
774 buf->next = NULL;
775 buf->phys_next = 0;
776 goto next;
777 }
778 /* Two consecutive chunks on one page may be handled by the old
779 * buffer descriptor, increased by the length of the new one
780 */
781 if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
782 buf->buf_len += len;
783 goto next;
784 }
785 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
786 if (!next_buf)
787 return NULL;
788 buf->next = next_buf;
789 buf->phys_next = next_buf_phys;
790
791 buf = next_buf;
792 buf->next = NULL;
793 buf->phys_next = 0;
794 buf->phys_addr = sg_dma_address(sg);
795 buf->buf_len = len;
796next:
797 if (nbytes > 0) {
798 sg = sg_next(sg);
799 }
800 }
801 return buf;
802}
803
804static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
805 unsigned int key_len)
806{
807 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
808 u32 *flags = &tfm->base.crt_flags;
809 int ret;
810
811 init_completion(&ctx->completion);
812 atomic_inc(&ctx->configuring);
813
814 reset_sa_dir(&ctx->encrypt);
815 reset_sa_dir(&ctx->decrypt);
816
817 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
818 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
819
820 ret = setup_cipher(&tfm->base, 0, key, key_len);
821 if (ret)
822 goto out;
823 ret = setup_cipher(&tfm->base, 1, key, key_len);
824 if (ret)
825 goto out;
826
827 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
828 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
829 ret = -EINVAL;
830 } else {
831 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
832 }
833 }
834out:
835 if (!atomic_dec_and_test(&ctx->configuring))
836 wait_for_completion(&ctx->completion);
837 return ret;
838}
839
840static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
841 unsigned int key_len)
842{
843 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
844
845 /* the nonce is stored in bytes at end of key */
846 if (key_len < CTR_RFC3686_NONCE_SIZE)
847 return -EINVAL;
848
849 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
850 CTR_RFC3686_NONCE_SIZE);
851
852 key_len -= CTR_RFC3686_NONCE_SIZE;
853 return ablk_setkey(tfm, key, key_len);
854}
855
856static int ablk_perform(struct ablkcipher_request *req, int encrypt)
857{
858 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
859 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
860 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
861 int ret = -ENOMEM;
862 struct ix_sa_dir *dir;
863 struct crypt_ctl *crypt;
864 unsigned int nbytes = req->nbytes, nents;
865 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
866 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
867 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
868 GFP_KERNEL : GFP_ATOMIC;
869
870 if (qmgr_stat_full(SEND_QID))
871 return -EAGAIN;
872 if (atomic_read(&ctx->configuring))
873 return -EAGAIN;
874
875 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
876
877 crypt = get_crypt_desc();
878 if (!crypt)
879 return ret;
880
881 crypt->data.ablk_req = req;
882 crypt->crypto_ctx = dir->npe_ctx_phys;
883 crypt->mode = dir->npe_mode;
884 crypt->init_len = dir->npe_ctx_idx;
885
886 crypt->crypt_offs = 0;
887 crypt->crypt_len = nbytes;
888
889 BUG_ON(ivsize && !req->info);
890 memcpy(crypt->iv, req->info, ivsize);
891 if (req->src != req->dst) {
892 crypt->mode |= NPE_OP_NOT_IN_PLACE;
893 nents = count_sg(req->dst, nbytes);
894 /* This was never tested by Intel
895 * for more than one dst buffer, I think. */
896 BUG_ON(nents != 1);
897 req_ctx->dst_nents = nents;
898 dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
899 req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
900 if (!req_ctx->dst)
901 goto unmap_sg_dest;
902 req_ctx->dst->phys_addr = 0;
903 if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
904 goto free_buf_dest;
905 src_direction = DMA_TO_DEVICE;
906 } else {
907 req_ctx->dst = NULL;
908 req_ctx->dst_nents = 0;
909 }
910 nents = count_sg(req->src, nbytes);
911 req_ctx->src_nents = nents;
912 dma_map_sg(dev, req->src, nents, src_direction);
913
914 req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
915 if (!req_ctx->src)
916 goto unmap_sg_src;
917 req_ctx->src->phys_addr = 0;
918 if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
919 goto free_buf_src;
920
921 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
922 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
923 BUG_ON(qmgr_stat_overflow(SEND_QID));
924 return -EINPROGRESS;
925
926free_buf_src:
927 free_buf_chain(req_ctx->src, crypt->src_buf);
928unmap_sg_src:
929 dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
930free_buf_dest:
931 if (req->src != req->dst) {
932 free_buf_chain(req_ctx->dst, crypt->dst_buf);
933unmap_sg_dest:
934 dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
935 DMA_FROM_DEVICE);
936 }
937 crypt->ctl_flags = CTL_FLAG_UNUSED;
938 return ret;
939}
940
941static int ablk_encrypt(struct ablkcipher_request *req)
942{
943 return ablk_perform(req, 1);
944}
945
946static int ablk_decrypt(struct ablkcipher_request *req)
947{
948 return ablk_perform(req, 0);
949}
950
951static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
952{
953 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
954 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
955 u8 iv[CTR_RFC3686_BLOCK_SIZE];
956 u8 *info = req->info;
957 int ret;
958
959 /* set up counter block */
960 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
961 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
962
963 /* initialize counter portion of counter block */
964 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
965 cpu_to_be32(1);
966
967 req->info = iv;
968 ret = ablk_perform(req, 1);
969 req->info = info;
970 return ret;
971}
972
973static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
974 unsigned int nbytes)
975{
976 int offset = 0;
977
978 if (!nbytes)
979 return 0;
980
981 for (;;) {
982 if (start < offset + sg->length)
983 break;
984
985 offset += sg->length;
986 sg = sg_next(sg);
987 }
988 return (start + nbytes > offset + sg->length);
989}
990
991static int aead_perform(struct aead_request *req, int encrypt,
992 int cryptoffset, int eff_cryptlen, u8 *iv)
993{
994 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
995 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
996 unsigned ivsize = crypto_aead_ivsize(tfm);
997 unsigned authsize = crypto_aead_authsize(tfm);
998 int ret = -ENOMEM;
999 struct ix_sa_dir *dir;
1000 struct crypt_ctl *crypt;
1001 unsigned int cryptlen, nents;
1002 struct buffer_desc *buf;
1003 struct aead_ctx *req_ctx = aead_request_ctx(req);
1004 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1005 GFP_KERNEL : GFP_ATOMIC;
1006
1007 if (qmgr_stat_full(SEND_QID))
1008 return -EAGAIN;
1009 if (atomic_read(&ctx->configuring))
1010 return -EAGAIN;
1011
1012 if (encrypt) {
1013 dir = &ctx->encrypt;
1014 cryptlen = req->cryptlen;
1015 } else {
1016 dir = &ctx->decrypt;
1017 /* req->cryptlen includes the authsize when decrypting */
1018 cryptlen = req->cryptlen -authsize;
1019 eff_cryptlen -= authsize;
1020 }
1021 crypt = get_crypt_desc();
1022 if (!crypt)
1023 return ret;
1024
1025 crypt->data.aead_req = req;
1026 crypt->crypto_ctx = dir->npe_ctx_phys;
1027 crypt->mode = dir->npe_mode;
1028 crypt->init_len = dir->npe_ctx_idx;
1029
1030 crypt->crypt_offs = cryptoffset;
1031 crypt->crypt_len = eff_cryptlen;
1032
1033 crypt->auth_offs = 0;
1034 crypt->auth_len = req->assoclen + ivsize + cryptlen;
1035 BUG_ON(ivsize && !req->iv);
1036 memcpy(crypt->iv, req->iv, ivsize);
1037
1038 if (req->src != req->dst) {
1039 BUG(); /* -ENOTSUP because of my lazyness */
1040 }
1041
1042 req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
1043 if (!req_ctx->buffer)
1044 goto out;
1045 req_ctx->buffer->phys_addr = 0;
1046 /* ASSOC data */
1047 nents = count_sg(req->assoc, req->assoclen);
1048 req_ctx->assoc_nents = nents;
1049 dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
1050 buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
1051 if (!buf)
1052 goto unmap_sg_assoc;
1053 /* IV */
1054 sg_init_table(&req_ctx->ivlist, 1);
1055 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1056 dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1057 buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
1058 if (!buf)
1059 goto unmap_sg_iv;
1060 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1061 /* The 12 hmac bytes are scattered,
1062 * we need to copy them into a safe buffer */
1063 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1064 &crypt->icv_rev_aes);
1065 if (unlikely(!req_ctx->hmac_virt))
1066 goto unmap_sg_iv;
1067 if (!encrypt) {
1068 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1069 req->src, cryptlen, authsize, 0);
1070 }
1071 req_ctx->encrypt = encrypt;
1072 } else {
1073 req_ctx->hmac_virt = NULL;
1074 }
1075 /* Crypt */
1076 nents = count_sg(req->src, cryptlen + authsize);
1077 req_ctx->src_nents = nents;
1078 dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
1079 buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
1080 if (!buf)
1081 goto unmap_sg_src;
1082 if (!req_ctx->hmac_virt) {
1083 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1084 }
1085 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1086 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1087 BUG_ON(qmgr_stat_overflow(SEND_QID));
1088 return -EINPROGRESS;
1089unmap_sg_src:
1090 dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
1091 if (req_ctx->hmac_virt) {
1092 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1093 crypt->icv_rev_aes);
1094 }
1095unmap_sg_iv:
1096 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1097unmap_sg_assoc:
1098 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
1099 free_buf_chain(req_ctx->buffer, crypt->src_buf);
1100out:
1101 crypt->ctl_flags = CTL_FLAG_UNUSED;
1102 return ret;
1103}
1104
1105static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1106{
1107 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108 u32 *flags = &tfm->base.crt_flags;
1109 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1110 int ret;
1111
1112 if (!ctx->enckey_len && !ctx->authkey_len)
1113 return 0;
1114 init_completion(&ctx->completion);
1115 atomic_inc(&ctx->configuring);
1116
1117 reset_sa_dir(&ctx->encrypt);
1118 reset_sa_dir(&ctx->decrypt);
1119
1120 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1121 if (ret)
1122 goto out;
1123 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1124 if (ret)
1125 goto out;
1126 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1127 ctx->authkey_len, digest_len);
1128 if (ret)
1129 goto out;
1130 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1131 ctx->authkey_len, digest_len);
1132 if (ret)
1133 goto out;
1134
1135 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1136 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1137 ret = -EINVAL;
1138 goto out;
1139 } else {
1140 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1141 }
1142 }
1143out:
1144 if (!atomic_dec_and_test(&ctx->configuring))
1145 wait_for_completion(&ctx->completion);
1146 return ret;
1147}
1148
1149static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1150{
1151 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1152
1153 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1154 return -EINVAL;
1155 return aead_setup(tfm, authsize);
1156}
1157
1158static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159 unsigned int keylen)
1160{
1161 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1162 struct rtattr *rta = (struct rtattr *)key;
1163 struct crypto_authenc_key_param *param;
1164
1165 if (!RTA_OK(rta, keylen))
1166 goto badkey;
1167 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1168 goto badkey;
1169 if (RTA_PAYLOAD(rta) < sizeof(*param))
1170 goto badkey;
1171
1172 param = RTA_DATA(rta);
1173 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1174
1175 key += RTA_ALIGN(rta->rta_len);
1176 keylen -= RTA_ALIGN(rta->rta_len);
1177
1178 if (keylen < ctx->enckey_len)
1179 goto badkey;
1180
1181 ctx->authkey_len = keylen - ctx->enckey_len;
1182 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1183 memcpy(ctx->authkey, key, ctx->authkey_len);
1184
1185 return aead_setup(tfm, crypto_aead_authsize(tfm));
1186badkey:
1187 ctx->enckey_len = 0;
1188 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1189 return -EINVAL;
1190}
1191
1192static int aead_encrypt(struct aead_request *req)
1193{
1194 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1195 return aead_perform(req, 1, req->assoclen + ivsize,
1196 req->cryptlen, req->iv);
1197}
1198
1199static int aead_decrypt(struct aead_request *req)
1200{
1201 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1202 return aead_perform(req, 0, req->assoclen + ivsize,
1203 req->cryptlen, req->iv);
1204}
1205
1206static int aead_givencrypt(struct aead_givcrypt_request *req)
1207{
1208 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1209 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1210 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1211 __be64 seq;
1212
1213 /* copied from eseqiv.c */
1214 if (!ctx->salted) {
1215 get_random_bytes(ctx->salt, ivsize);
1216 ctx->salted = 1;
1217 }
1218 memcpy(req->areq.iv, ctx->salt, ivsize);
1219 len = ivsize;
1220 if (ivsize > sizeof(u64)) {
1221 memset(req->giv, 0, ivsize - sizeof(u64));
1222 len = sizeof(u64);
1223 }
1224 seq = cpu_to_be64(req->seq);
1225 memcpy(req->giv + ivsize - len, &seq, len);
1226 return aead_perform(&req->areq, 1, req->areq.assoclen,
1227 req->areq.cryptlen +ivsize, req->giv);
1228}
1229
1230static struct ixp_alg ixp4xx_algos[] = {
1231{
1232 .crypto = {
1233 .cra_name = "cbc(des)",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES_KEY_SIZE,
1237 .max_keysize = DES_KEY_SIZE,
1238 .ivsize = DES_BLOCK_SIZE,
1239 .geniv = "eseqiv",
1240 }
1241 }
1242 },
1243 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1245
1246}, {
1247 .crypto = {
1248 .cra_name = "ecb(des)",
1249 .cra_blocksize = DES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1253 }
1254 }
1255 },
1256 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258}, {
1259 .crypto = {
1260 .cra_name = "cbc(des3_ede)",
1261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1262 .cra_u = { .ablkcipher = {
1263 .min_keysize = DES3_EDE_KEY_SIZE,
1264 .max_keysize = DES3_EDE_KEY_SIZE,
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .geniv = "eseqiv",
1267 }
1268 }
1269 },
1270 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1271 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1272}, {
1273 .crypto = {
1274 .cra_name = "ecb(des3_ede)",
1275 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1276 .cra_u = { .ablkcipher = {
1277 .min_keysize = DES3_EDE_KEY_SIZE,
1278 .max_keysize = DES3_EDE_KEY_SIZE,
1279 }
1280 }
1281 },
1282 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1284}, {
1285 .crypto = {
1286 .cra_name = "cbc(aes)",
1287 .cra_blocksize = AES_BLOCK_SIZE,
1288 .cra_u = { .ablkcipher = {
1289 .min_keysize = AES_MIN_KEY_SIZE,
1290 .max_keysize = AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .geniv = "eseqiv",
1293 }
1294 }
1295 },
1296 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1297 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1298}, {
1299 .crypto = {
1300 .cra_name = "ecb(aes)",
1301 .cra_blocksize = AES_BLOCK_SIZE,
1302 .cra_u = { .ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1305 }
1306 }
1307 },
1308 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1309 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1310}, {
1311 .crypto = {
1312 .cra_name = "ctr(aes)",
1313 .cra_blocksize = AES_BLOCK_SIZE,
1314 .cra_u = { .ablkcipher = {
1315 .min_keysize = AES_MIN_KEY_SIZE,
1316 .max_keysize = AES_MAX_KEY_SIZE,
1317 .ivsize = AES_BLOCK_SIZE,
1318 .geniv = "eseqiv",
1319 }
1320 }
1321 },
1322 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1323 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1324}, {
1325 .crypto = {
1326 .cra_name = "rfc3686(ctr(aes))",
1327 .cra_blocksize = AES_BLOCK_SIZE,
1328 .cra_u = { .ablkcipher = {
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .geniv = "eseqiv",
1333 .setkey = ablk_rfc3686_setkey,
1334 .encrypt = ablk_rfc3686_crypt,
1335 .decrypt = ablk_rfc3686_crypt }
1336 }
1337 },
1338 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1339 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1340}, {
1341 .crypto = {
1342 .cra_name = "authenc(hmac(md5),cbc(des))",
1343 .cra_blocksize = DES_BLOCK_SIZE,
1344 .cra_u = { .aead = {
1345 .ivsize = DES_BLOCK_SIZE,
1346 .maxauthsize = MD5_DIGEST_SIZE,
1347 }
1348 }
1349 },
1350 .hash = &hash_alg_md5,
1351 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1352 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1353}, {
1354 .crypto = {
1355 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1356 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1357 .cra_u = { .aead = {
1358 .ivsize = DES3_EDE_BLOCK_SIZE,
1359 .maxauthsize = MD5_DIGEST_SIZE,
1360 }
1361 }
1362 },
1363 .hash = &hash_alg_md5,
1364 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1365 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1366}, {
1367 .crypto = {
1368 .cra_name = "authenc(hmac(sha1),cbc(des))",
1369 .cra_blocksize = DES_BLOCK_SIZE,
1370 .cra_u = { .aead = {
1371 .ivsize = DES_BLOCK_SIZE,
1372 .maxauthsize = SHA1_DIGEST_SIZE,
1373 }
1374 }
1375 },
1376 .hash = &hash_alg_sha1,
1377 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1378 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1379}, {
1380 .crypto = {
1381 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1383 .cra_u = { .aead = {
1384 .ivsize = DES3_EDE_BLOCK_SIZE,
1385 .maxauthsize = SHA1_DIGEST_SIZE,
1386 }
1387 }
1388 },
1389 .hash = &hash_alg_sha1,
1390 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1391 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1392}, {
1393 .crypto = {
1394 .cra_name = "authenc(hmac(md5),cbc(aes))",
1395 .cra_blocksize = AES_BLOCK_SIZE,
1396 .cra_u = { .aead = {
1397 .ivsize = AES_BLOCK_SIZE,
1398 .maxauthsize = MD5_DIGEST_SIZE,
1399 }
1400 }
1401 },
1402 .hash = &hash_alg_md5,
1403 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1404 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1405}, {
1406 .crypto = {
1407 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1408 .cra_blocksize = AES_BLOCK_SIZE,
1409 .cra_u = { .aead = {
1410 .ivsize = AES_BLOCK_SIZE,
1411 .maxauthsize = SHA1_DIGEST_SIZE,
1412 }
1413 }
1414 },
1415 .hash = &hash_alg_sha1,
1416 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1417 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1418} };
1419
1420#define IXP_POSTFIX "-ixp4xx"
1421static int __init ixp_module_init(void)
1422{
1423 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ;
1425
1426 if (platform_device_register(&pseudo_dev))
1427 return -ENODEV;
1428
1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock);
1431
1432 err = init_ixp_crypto();
1433 if (err) {
1434 platform_device_unregister(&pseudo_dev);
1435 return err;
1436 }
1437 for (i=0; i< num; i++) {
1438 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1439
1440 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1441 "%s"IXP_POSTFIX, cra->cra_name) >=
1442 CRYPTO_MAX_ALG_NAME)
1443 {
1444 continue;
1445 }
1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1447 continue;
1448 }
1449 if (!ixp4xx_algos[i].hash) {
1450 /* block ciphers */
1451 cra->cra_type = &crypto_ablkcipher_type;
1452 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1453 CRYPTO_ALG_ASYNC;
1454 if (!cra->cra_ablkcipher.setkey)
1455 cra->cra_ablkcipher.setkey = ablk_setkey;
1456 if (!cra->cra_ablkcipher.encrypt)
1457 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1458 if (!cra->cra_ablkcipher.decrypt)
1459 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1460 cra->cra_init = init_tfm_ablk;
1461 } else {
1462 /* authenc */
1463 cra->cra_type = &crypto_aead_type;
1464 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1465 CRYPTO_ALG_ASYNC;
1466 cra->cra_aead.setkey = aead_setkey;
1467 cra->cra_aead.setauthsize = aead_setauthsize;
1468 cra->cra_aead.encrypt = aead_encrypt;
1469 cra->cra_aead.decrypt = aead_decrypt;
1470 cra->cra_aead.givencrypt = aead_givencrypt;
1471 cra->cra_init = init_tfm_aead;
1472 }
1473 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1474 cra->cra_module = THIS_MODULE;
1475 cra->cra_alignmask = 3;
1476 cra->cra_priority = 300;
1477 cra->cra_exit = exit_tfm;
1478 if (crypto_register_alg(cra))
1479 printk(KERN_ERR "Failed to register '%s'\n",
1480 cra->cra_name);
1481 else
1482 ixp4xx_algos[i].registered = 1;
1483 }
1484 return 0;
1485}
1486
1487static void __exit ixp_module_exit(void)
1488{
1489 int num = ARRAY_SIZE(ixp4xx_algos);
1490 int i;
1491
1492 for (i=0; i< num; i++) {
1493 if (ixp4xx_algos[i].registered)
1494 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1495 }
1496 release_ixp_crypto();
1497 platform_device_unregister(&pseudo_dev);
1498}
1499
1500module_init(ixp_module_init);
1501module_exit(ixp_module_exit);
1502
1503MODULE_LICENSE("GPL");
1504MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1505MODULE_DESCRIPTION("IXP4xx hardware crypto");
1506
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index bb30eb9b93ef..54a2a166e566 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -385,12 +385,12 @@ static int __init padlock_init(void)
385 int ret; 385 int ret;
386 386
387 if (!cpu_has_xcrypt) { 387 if (!cpu_has_xcrypt) {
388 printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 388 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
389 return -ENODEV; 389 return -ENODEV;
390 } 390 }
391 391
392 if (!cpu_has_xcrypt_enabled) { 392 if (!cpu_has_xcrypt_enabled) {
393 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 393 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
394 return -ENODEV; 394 return -ENODEV;
395 } 395 }
396 396
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index c666b4e0933e..40d5680fa013 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -254,12 +254,12 @@ static int __init padlock_init(void)
254 int rc = -ENODEV; 254 int rc = -ENODEV;
255 255
256 if (!cpu_has_phe) { 256 if (!cpu_has_phe) {
257 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); 257 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
258 return -ENODEV; 258 return -ENODEV;
259 } 259 }
260 260
261 if (!cpu_has_phe_enabled) { 261 if (!cpu_has_phe_enabled) {
262 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 262 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
263 return -ENODEV; 263 return -ENODEV;
264 } 264 }
265 265
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
new file mode 100644
index 000000000000..b11943dadefd
--- /dev/null
+++ b/drivers/crypto/talitos.c
@@ -0,0 +1,1597 @@
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/io.h>
38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h>
40
41#include <crypto/algapi.h>
42#include <crypto/aes.h>
43#include <crypto/des.h>
44#include <crypto/sha.h>
45#include <crypto/aead.h>
46#include <crypto/authenc.h>
47
48#include "talitos.h"
49
50#define TALITOS_TIMEOUT 100000
51#define TALITOS_MAX_DATA_LEN 65535
52
53#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
54#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
55#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
56
57/* descriptor pointer entry */
58struct talitos_ptr {
59 __be16 len; /* length */
60 u8 j_extent; /* jump to sg link table and/or extent */
61 u8 eptr; /* extended address */
62 __be32 ptr; /* address */
63};
64
65/* descriptor */
66struct talitos_desc {
67 __be32 hdr; /* header high bits */
68 __be32 hdr_lo; /* header low bits */
69 struct talitos_ptr ptr[7]; /* ptr/len pair array */
70};
71
72/**
73 * talitos_request - descriptor submission request
74 * @desc: descriptor pointer (kernel virtual)
75 * @dma_desc: descriptor's physical bus address
76 * @callback: whom to call when descriptor processing is done
77 * @context: caller context (optional)
78 */
79struct talitos_request {
80 struct talitos_desc *desc;
81 dma_addr_t dma_desc;
82 void (*callback) (struct device *dev, struct talitos_desc *desc,
83 void *context, int error);
84 void *context;
85};
86
87struct talitos_private {
88 struct device *dev;
89 struct of_device *ofdev;
90 void __iomem *reg;
91 int irq;
92
93 /* SEC version geometry (from device tree node) */
94 unsigned int num_channels;
95 unsigned int chfifo_len;
96 unsigned int exec_units;
97 unsigned int desc_types;
98
99 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan;
101
102 /* per-channel request fifo */
103 struct talitos_request **fifo;
104
105 /*
106 * length of the request fifo
107 * fifo_len is chfifo_len rounded up to next power of 2
108 * so we can use bitwise ops to wrap
109 */
110 unsigned int fifo_len;
111
112 /* per-channel index to next free descriptor request */
113 int *head;
114
115 /* per-channel index to next in-progress/done descriptor request */
116 int *tail;
117
118 /* per-channel request submission (head) and release (tail) locks */
119 spinlock_t *head_lock;
120 spinlock_t *tail_lock;
121
122 /* request callback tasklet */
123 struct tasklet_struct done_task;
124 struct tasklet_struct error_task;
125
126 /* list of registered algorithms */
127 struct list_head alg_list;
128
129 /* hwrng device */
130 struct hwrng rng;
131};
132
133/*
134 * map virtual single (contiguous) pointer to h/w descriptor pointer
135 */
136static void map_single_talitos_ptr(struct device *dev,
137 struct talitos_ptr *talitos_ptr,
138 unsigned short len, void *data,
139 unsigned char extent,
140 enum dma_data_direction dir)
141{
142 talitos_ptr->len = cpu_to_be16(len);
143 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
144 talitos_ptr->j_extent = extent;
145}
146
147/*
148 * unmap bus single (contiguous) h/w descriptor pointer
149 */
150static void unmap_single_talitos_ptr(struct device *dev,
151 struct talitos_ptr *talitos_ptr,
152 enum dma_data_direction dir)
153{
154 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
155 be16_to_cpu(talitos_ptr->len), dir);
156}
157
158static int reset_channel(struct device *dev, int ch)
159{
160 struct talitos_private *priv = dev_get_drvdata(dev);
161 unsigned int timeout = TALITOS_TIMEOUT;
162
163 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
164
165 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
166 && --timeout)
167 cpu_relax();
168
169 if (timeout == 0) {
170 dev_err(dev, "failed to reset channel %d\n", ch);
171 return -EIO;
172 }
173
174 /* set done writeback and IRQ */
175 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
176 TALITOS_CCCR_LO_CDIE);
177
178 return 0;
179}
180
181static int reset_device(struct device *dev)
182{
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185
186 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
187
188 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
189 && --timeout)
190 cpu_relax();
191
192 if (timeout == 0) {
193 dev_err(dev, "failed to reset device\n");
194 return -EIO;
195 }
196
197 return 0;
198}
199
200/*
201 * Reset and initialize the device
202 */
203static int init_device(struct device *dev)
204{
205 struct talitos_private *priv = dev_get_drvdata(dev);
206 int ch, err;
207
208 /*
209 * Master reset
210 * errata documentation: warning: certain SEC interrupts
211 * are not fully cleared by writing the MCR:SWR bit,
212 * set bit twice to completely reset
213 */
214 err = reset_device(dev);
215 if (err)
216 return err;
217
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 /* reset channels */
223 for (ch = 0; ch < priv->num_channels; ch++) {
224 err = reset_channel(dev, ch);
225 if (err)
226 return err;
227 }
228
229 /* enable channel done and error interrupts */
230 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
231 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
232
233 return 0;
234}
235
236/**
237 * talitos_submit - submits a descriptor to the device for processing
238 * @dev: the SEC device to be used
239 * @desc: the descriptor to be processed by the device
240 * @callback: whom to call when processing is complete
241 * @context: a handle for use by caller (optional)
242 *
243 * desc must contain valid dma-mapped (bus physical) address pointers.
244 * callback must check err and feedback in descriptor header
245 * for device processing status.
246 */
247static int talitos_submit(struct device *dev, struct talitos_desc *desc,
248 void (*callback)(struct device *dev,
249 struct talitos_desc *desc,
250 void *context, int error),
251 void *context)
252{
253 struct talitos_private *priv = dev_get_drvdata(dev);
254 struct talitos_request *request;
255 unsigned long flags, ch;
256 int head;
257
258 /* select done notification */
259 desc->hdr |= DESC_HDR_DONE_NOTIFY;
260
261 /* emulate SEC's round-robin channel fifo polling scheme */
262 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
263
264 spin_lock_irqsave(&priv->head_lock[ch], flags);
265
266 head = priv->head[ch];
267 request = &priv->fifo[ch][head];
268
269 if (request->desc) {
270 /* request queue is full */
271 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
272 return -EAGAIN;
273 }
274
275 /* map descriptor and save caller data */
276 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
277 DMA_BIDIRECTIONAL);
278 request->callback = callback;
279 request->context = context;
280
281 /* increment fifo head */
282 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
283
284 smp_wmb();
285 request->desc = desc;
286
287 /* GO! */
288 wmb();
289 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
290
291 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
292
293 return -EINPROGRESS;
294}
295
296/*
297 * process what was done, notify callback of error if not
298 */
299static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
300{
301 struct talitos_private *priv = dev_get_drvdata(dev);
302 struct talitos_request *request, saved_req;
303 unsigned long flags;
304 int tail, status;
305
306 spin_lock_irqsave(&priv->tail_lock[ch], flags);
307
308 tail = priv->tail[ch];
309 while (priv->fifo[ch][tail].desc) {
310 request = &priv->fifo[ch][tail];
311
312 /* descriptors with their done bits set don't get the error */
313 rmb();
314 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
315 status = 0;
316 else
317 if (!error)
318 break;
319 else
320 status = error;
321
322 dma_unmap_single(dev, request->dma_desc,
323 sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
324
325 /* copy entries so we can call callback outside lock */
326 saved_req.desc = request->desc;
327 saved_req.callback = request->callback;
328 saved_req.context = request->context;
329
330 /* release request entry in fifo */
331 smp_wmb();
332 request->desc = NULL;
333
334 /* increment fifo tail */
335 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
336
337 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
338 saved_req.callback(dev, saved_req.desc, saved_req.context,
339 status);
340 /* channel may resume processing in single desc error case */
341 if (error && !reset_ch && status == error)
342 return;
343 spin_lock_irqsave(&priv->tail_lock[ch], flags);
344 tail = priv->tail[ch];
345 }
346
347 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
348}
349
350/*
351 * process completed requests for channels that have done status
352 */
353static void talitos_done(unsigned long data)
354{
355 struct device *dev = (struct device *)data;
356 struct talitos_private *priv = dev_get_drvdata(dev);
357 int ch;
358
359 for (ch = 0; ch < priv->num_channels; ch++)
360 flush_channel(dev, ch, 0, 0);
361}
362
363/*
364 * locate current (offending) descriptor
365 */
366static struct talitos_desc *current_desc(struct device *dev, int ch)
367{
368 struct talitos_private *priv = dev_get_drvdata(dev);
369 int tail = priv->tail[ch];
370 dma_addr_t cur_desc;
371
372 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
373
374 while (priv->fifo[ch][tail].dma_desc != cur_desc) {
375 tail = (tail + 1) & (priv->fifo_len - 1);
376 if (tail == priv->tail[ch]) {
377 dev_err(dev, "couldn't locate current descriptor\n");
378 return NULL;
379 }
380 }
381
382 return priv->fifo[ch][tail].desc;
383}
384
385/*
386 * user diagnostics; report root cause of error based on execution unit status
387 */
388static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
389{
390 struct talitos_private *priv = dev_get_drvdata(dev);
391 int i;
392
393 switch (desc->hdr & DESC_HDR_SEL0_MASK) {
394 case DESC_HDR_SEL0_AFEU:
395 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
396 in_be32(priv->reg + TALITOS_AFEUISR),
397 in_be32(priv->reg + TALITOS_AFEUISR_LO));
398 break;
399 case DESC_HDR_SEL0_DEU:
400 dev_err(dev, "DEUISR 0x%08x_%08x\n",
401 in_be32(priv->reg + TALITOS_DEUISR),
402 in_be32(priv->reg + TALITOS_DEUISR_LO));
403 break;
404 case DESC_HDR_SEL0_MDEUA:
405 case DESC_HDR_SEL0_MDEUB:
406 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
407 in_be32(priv->reg + TALITOS_MDEUISR),
408 in_be32(priv->reg + TALITOS_MDEUISR_LO));
409 break;
410 case DESC_HDR_SEL0_RNG:
411 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
412 in_be32(priv->reg + TALITOS_RNGUISR),
413 in_be32(priv->reg + TALITOS_RNGUISR_LO));
414 break;
415 case DESC_HDR_SEL0_PKEU:
416 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
417 in_be32(priv->reg + TALITOS_PKEUISR),
418 in_be32(priv->reg + TALITOS_PKEUISR_LO));
419 break;
420 case DESC_HDR_SEL0_AESU:
421 dev_err(dev, "AESUISR 0x%08x_%08x\n",
422 in_be32(priv->reg + TALITOS_AESUISR),
423 in_be32(priv->reg + TALITOS_AESUISR_LO));
424 break;
425 case DESC_HDR_SEL0_CRCU:
426 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
427 in_be32(priv->reg + TALITOS_CRCUISR),
428 in_be32(priv->reg + TALITOS_CRCUISR_LO));
429 break;
430 case DESC_HDR_SEL0_KEU:
431 dev_err(dev, "KEUISR 0x%08x_%08x\n",
432 in_be32(priv->reg + TALITOS_KEUISR),
433 in_be32(priv->reg + TALITOS_KEUISR_LO));
434 break;
435 }
436
437 switch (desc->hdr & DESC_HDR_SEL1_MASK) {
438 case DESC_HDR_SEL1_MDEUA:
439 case DESC_HDR_SEL1_MDEUB:
440 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
441 in_be32(priv->reg + TALITOS_MDEUISR),
442 in_be32(priv->reg + TALITOS_MDEUISR_LO));
443 break;
444 case DESC_HDR_SEL1_CRCU:
445 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
446 in_be32(priv->reg + TALITOS_CRCUISR),
447 in_be32(priv->reg + TALITOS_CRCUISR_LO));
448 break;
449 }
450
451 for (i = 0; i < 8; i++)
452 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
453 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
454 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
455}
456
457/*
458 * recover from error interrupts
459 */
460static void talitos_error(unsigned long data)
461{
462 struct device *dev = (struct device *)data;
463 struct talitos_private *priv = dev_get_drvdata(dev);
464 unsigned int timeout = TALITOS_TIMEOUT;
465 int ch, error, reset_dev = 0, reset_ch = 0;
466 u32 isr, isr_lo, v, v_lo;
467
468 isr = in_be32(priv->reg + TALITOS_ISR);
469 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
470
471 for (ch = 0; ch < priv->num_channels; ch++) {
472 /* skip channels without errors */
473 if (!(isr & (1 << (ch * 2 + 1))))
474 continue;
475
476 error = -EINVAL;
477
478 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
479 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
480
481 if (v_lo & TALITOS_CCPSR_LO_DOF) {
482 dev_err(dev, "double fetch fifo overflow error\n");
483 error = -EAGAIN;
484 reset_ch = 1;
485 }
486 if (v_lo & TALITOS_CCPSR_LO_SOF) {
487 /* h/w dropped descriptor */
488 dev_err(dev, "single fetch fifo overflow error\n");
489 error = -EAGAIN;
490 }
491 if (v_lo & TALITOS_CCPSR_LO_MDTE)
492 dev_err(dev, "master data transfer error\n");
493 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
494 dev_err(dev, "s/g data length zero error\n");
495 if (v_lo & TALITOS_CCPSR_LO_FPZ)
496 dev_err(dev, "fetch pointer zero error\n");
497 if (v_lo & TALITOS_CCPSR_LO_IDH)
498 dev_err(dev, "illegal descriptor header error\n");
499 if (v_lo & TALITOS_CCPSR_LO_IEU)
500 dev_err(dev, "invalid execution unit error\n");
501 if (v_lo & TALITOS_CCPSR_LO_EU)
502 report_eu_error(dev, ch, current_desc(dev, ch));
503 if (v_lo & TALITOS_CCPSR_LO_GB)
504 dev_err(dev, "gather boundary error\n");
505 if (v_lo & TALITOS_CCPSR_LO_GRL)
506 dev_err(dev, "gather return/length error\n");
507 if (v_lo & TALITOS_CCPSR_LO_SB)
508 dev_err(dev, "scatter boundary error\n");
509 if (v_lo & TALITOS_CCPSR_LO_SRL)
510 dev_err(dev, "scatter return/length error\n");
511
512 flush_channel(dev, ch, error, reset_ch);
513
514 if (reset_ch) {
515 reset_channel(dev, ch);
516 } else {
517 setbits32(priv->reg + TALITOS_CCCR(ch),
518 TALITOS_CCCR_CONT);
519 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
520 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
521 TALITOS_CCCR_CONT) && --timeout)
522 cpu_relax();
523 if (timeout == 0) {
524 dev_err(dev, "failed to restart channel %d\n",
525 ch);
526 reset_dev = 1;
527 }
528 }
529 }
530 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
531 dev_err(dev, "done overflow, internal time out, or rngu error: "
532 "ISR 0x%08x_%08x\n", isr, isr_lo);
533
534 /* purge request queues */
535 for (ch = 0; ch < priv->num_channels; ch++)
536 flush_channel(dev, ch, -EIO, 1);
537
538 /* reset and reinitialize the device */
539 init_device(dev);
540 }
541}
542
543static irqreturn_t talitos_interrupt(int irq, void *data)
544{
545 struct device *dev = data;
546 struct talitos_private *priv = dev_get_drvdata(dev);
547 u32 isr, isr_lo;
548
549 isr = in_be32(priv->reg + TALITOS_ISR);
550 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
551
552 /* ack */
553 out_be32(priv->reg + TALITOS_ICR, isr);
554 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
555
556 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
557 talitos_error((unsigned long)data);
558 else
559 if (likely(isr & TALITOS_ISR_CHDONE))
560 tasklet_schedule(&priv->done_task);
561
562 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
563}
564
565/*
566 * hwrng
567 */
568static int talitos_rng_data_present(struct hwrng *rng, int wait)
569{
570 struct device *dev = (struct device *)rng->priv;
571 struct talitos_private *priv = dev_get_drvdata(dev);
572 u32 ofl;
573 int i;
574
575 for (i = 0; i < 20; i++) {
576 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
577 TALITOS_RNGUSR_LO_OFL;
578 if (ofl || !wait)
579 break;
580 udelay(10);
581 }
582
583 return !!ofl;
584}
585
586static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
587{
588 struct device *dev = (struct device *)rng->priv;
589 struct talitos_private *priv = dev_get_drvdata(dev);
590
591 /* rng fifo requires 64-bit accesses */
592 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
593 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
594
595 return sizeof(u32);
596}
597
598static int talitos_rng_init(struct hwrng *rng)
599{
600 struct device *dev = (struct device *)rng->priv;
601 struct talitos_private *priv = dev_get_drvdata(dev);
602 unsigned int timeout = TALITOS_TIMEOUT;
603
604 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
605 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
606 && --timeout)
607 cpu_relax();
608 if (timeout == 0) {
609 dev_err(dev, "failed to reset rng hw\n");
610 return -ENODEV;
611 }
612
613 /* start generating */
614 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
615
616 return 0;
617}
618
619static int talitos_register_rng(struct device *dev)
620{
621 struct talitos_private *priv = dev_get_drvdata(dev);
622
623 priv->rng.name = dev_driver_string(dev),
624 priv->rng.init = talitos_rng_init,
625 priv->rng.data_present = talitos_rng_data_present,
626 priv->rng.data_read = talitos_rng_data_read,
627 priv->rng.priv = (unsigned long)dev;
628
629 return hwrng_register(&priv->rng);
630}
631
632static void talitos_unregister_rng(struct device *dev)
633{
634 struct talitos_private *priv = dev_get_drvdata(dev);
635
636 hwrng_unregister(&priv->rng);
637}
638
639/*
640 * crypto alg
641 */
642#define TALITOS_CRA_PRIORITY 3000
643#define TALITOS_MAX_KEY_SIZE 64
644#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
645
646#define MD5_DIGEST_SIZE 16
647
648struct talitos_ctx {
649 struct device *dev;
650 __be32 desc_hdr_template;
651 u8 key[TALITOS_MAX_KEY_SIZE];
652 u8 iv[TALITOS_MAX_IV_LENGTH];
653 unsigned int keylen;
654 unsigned int enckeylen;
655 unsigned int authkeylen;
656 unsigned int authsize;
657};
658
659static int aead_authenc_setauthsize(struct crypto_aead *authenc,
660 unsigned int authsize)
661{
662 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
663
664 ctx->authsize = authsize;
665
666 return 0;
667}
668
669static int aead_authenc_setkey(struct crypto_aead *authenc,
670 const u8 *key, unsigned int keylen)
671{
672 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
673 struct rtattr *rta = (void *)key;
674 struct crypto_authenc_key_param *param;
675 unsigned int authkeylen;
676 unsigned int enckeylen;
677
678 if (!RTA_OK(rta, keylen))
679 goto badkey;
680
681 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
682 goto badkey;
683
684 if (RTA_PAYLOAD(rta) < sizeof(*param))
685 goto badkey;
686
687 param = RTA_DATA(rta);
688 enckeylen = be32_to_cpu(param->enckeylen);
689
690 key += RTA_ALIGN(rta->rta_len);
691 keylen -= RTA_ALIGN(rta->rta_len);
692
693 if (keylen < enckeylen)
694 goto badkey;
695
696 authkeylen = keylen - enckeylen;
697
698 if (keylen > TALITOS_MAX_KEY_SIZE)
699 goto badkey;
700
701 memcpy(&ctx->key, key, keylen);
702
703 ctx->keylen = keylen;
704 ctx->enckeylen = enckeylen;
705 ctx->authkeylen = authkeylen;
706
707 return 0;
708
709badkey:
710 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
711 return -EINVAL;
712}
713
714/*
715 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
716 * @src_nents: number of segments in input scatterlist
717 * @dst_nents: number of segments in output scatterlist
718 * @dma_len: length of dma mapped link_tbl space
719 * @dma_link_tbl: bus physical address of link_tbl
720 * @desc: h/w descriptor
721 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
722 *
723 * if decrypting (with authcheck), or either one of src_nents or dst_nents
724 * is greater than 1, an integrity check value is concatenated to the end
725 * of link_tbl data
726 */
727struct ipsec_esp_edesc {
728 int src_nents;
729 int dst_nents;
730 int dma_len;
731 dma_addr_t dma_link_tbl;
732 struct talitos_desc desc;
733 struct talitos_ptr link_tbl[0];
734};
735
736static void ipsec_esp_unmap(struct device *dev,
737 struct ipsec_esp_edesc *edesc,
738 struct aead_request *areq)
739{
740 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
741 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
742 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
743 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
744
745 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
746
747 if (areq->src != areq->dst) {
748 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
749 DMA_TO_DEVICE);
750 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
751 DMA_FROM_DEVICE);
752 } else {
753 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
754 DMA_BIDIRECTIONAL);
755 }
756
757 if (edesc->dma_len)
758 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
759 DMA_BIDIRECTIONAL);
760}
761
762/*
763 * ipsec_esp descriptor callbacks
764 */
765static void ipsec_esp_encrypt_done(struct device *dev,
766 struct talitos_desc *desc, void *context,
767 int err)
768{
769 struct aead_request *areq = context;
770 struct ipsec_esp_edesc *edesc =
771 container_of(desc, struct ipsec_esp_edesc, desc);
772 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
773 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
774 struct scatterlist *sg;
775 void *icvdata;
776
777 ipsec_esp_unmap(dev, edesc, areq);
778
779 /* copy the generated ICV to dst */
780 if (edesc->dma_len) {
781 icvdata = &edesc->link_tbl[edesc->src_nents +
782 edesc->dst_nents + 1];
783 sg = sg_last(areq->dst, edesc->dst_nents);
784 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
785 icvdata, ctx->authsize);
786 }
787
788 kfree(edesc);
789
790 aead_request_complete(areq, err);
791}
792
793static void ipsec_esp_decrypt_done(struct device *dev,
794 struct talitos_desc *desc, void *context,
795 int err)
796{
797 struct aead_request *req = context;
798 struct ipsec_esp_edesc *edesc =
799 container_of(desc, struct ipsec_esp_edesc, desc);
800 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
801 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
802 struct scatterlist *sg;
803 void *icvdata;
804
805 ipsec_esp_unmap(dev, edesc, req);
806
807 if (!err) {
808 /* auth check */
809 if (edesc->dma_len)
810 icvdata = &edesc->link_tbl[edesc->src_nents +
811 edesc->dst_nents + 1];
812 else
813 icvdata = &edesc->link_tbl[0];
814
815 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
816 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
817 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
818 }
819
820 kfree(edesc);
821
822 aead_request_complete(req, err);
823}
824
825/*
826 * convert scatterlist to SEC h/w link table format
827 * stop at cryptlen bytes
828 */
829static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
830 int cryptlen, struct talitos_ptr *link_tbl_ptr)
831{
832 int n_sg = sg_count;
833
834 while (n_sg--) {
835 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
836 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
837 link_tbl_ptr->j_extent = 0;
838 link_tbl_ptr++;
839 cryptlen -= sg_dma_len(sg);
840 sg = sg_next(sg);
841 }
842
843 /* adjust (decrease) last one (or two) entry's len to cryptlen */
844 link_tbl_ptr--;
845 while (link_tbl_ptr->len <= (-cryptlen)) {
846 /* Empty this entry, and move to previous one */
847 cryptlen += be16_to_cpu(link_tbl_ptr->len);
848 link_tbl_ptr->len = 0;
849 sg_count--;
850 link_tbl_ptr--;
851 }
852 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
853 + cryptlen);
854
855 /* tag end of link table */
856 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
857
858 return sg_count;
859}
860
861/*
862 * fill in and submit ipsec_esp descriptor
863 */
864static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
865 u8 *giv, u64 seq,
866 void (*callback) (struct device *dev,
867 struct talitos_desc *desc,
868 void *context, int error))
869{
870 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
871 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
872 struct device *dev = ctx->dev;
873 struct talitos_desc *desc = &edesc->desc;
874 unsigned int cryptlen = areq->cryptlen;
875 unsigned int authsize = ctx->authsize;
876 unsigned int ivsize;
877 int sg_count;
878
879 /* hmac key */
880 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
881 0, DMA_TO_DEVICE);
882 /* hmac data */
883 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
884 sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
885 DMA_TO_DEVICE);
886 /* cipher iv */
887 ivsize = crypto_aead_ivsize(aead);
888 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
889 DMA_TO_DEVICE);
890
891 /* cipher key */
892 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
893 (char *)&ctx->key + ctx->authkeylen, 0,
894 DMA_TO_DEVICE);
895
896 /*
897 * cipher in
898 * map and adjust cipher len to aead request cryptlen.
899 * extent is bytes of HMAC postpended to ciphertext,
900 * typically 12 for ipsec
901 */
902 desc->ptr[4].len = cpu_to_be16(cryptlen);
903 desc->ptr[4].j_extent = authsize;
904
905 if (areq->src == areq->dst)
906 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
907 DMA_BIDIRECTIONAL);
908 else
909 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
910 DMA_TO_DEVICE);
911
912 if (sg_count == 1) {
913 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
914 } else {
915 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
916 &edesc->link_tbl[0]);
917 if (sg_count > 1) {
918 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
919 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
920 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
921 edesc->dma_len, DMA_BIDIRECTIONAL);
922 } else {
923 /* Only one segment now, so no link tbl needed */
924 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
925 }
926 }
927
928 /* cipher out */
929 desc->ptr[5].len = cpu_to_be16(cryptlen);
930 desc->ptr[5].j_extent = authsize;
931
932 if (areq->src != areq->dst) {
933 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
934 DMA_FROM_DEVICE);
935 }
936
937 if (sg_count == 1) {
938 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
939 } else {
940 struct talitos_ptr *link_tbl_ptr =
941 &edesc->link_tbl[edesc->src_nents];
942 struct scatterlist *sg;
943
944 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
945 edesc->dma_link_tbl +
946 edesc->src_nents);
947 if (areq->src == areq->dst) {
948 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
949 edesc->src_nents * sizeof(struct talitos_ptr));
950 } else {
951 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
952 link_tbl_ptr);
953 }
954 link_tbl_ptr += sg_count - 1;
955
956 /* handle case where sg_last contains the ICV exclusively */
957 sg = sg_last(areq->dst, edesc->dst_nents);
958 if (sg->length == ctx->authsize)
959 link_tbl_ptr--;
960
961 link_tbl_ptr->j_extent = 0;
962 link_tbl_ptr++;
963 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
964 link_tbl_ptr->len = cpu_to_be16(authsize);
965
966 /* icv data follows link tables */
967 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
968 edesc->dma_link_tbl +
969 edesc->src_nents +
970 edesc->dst_nents + 1);
971
972 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
973 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
974 edesc->dma_len, DMA_BIDIRECTIONAL);
975 }
976
977 /* iv out */
978 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
979 DMA_FROM_DEVICE);
980
981 return talitos_submit(dev, desc, callback, areq);
982}
983
984
985/*
986 * derive number of elements in scatterlist
987 */
988static int sg_count(struct scatterlist *sg_list, int nbytes)
989{
990 struct scatterlist *sg = sg_list;
991 int sg_nents = 0;
992
993 while (nbytes) {
994 sg_nents++;
995 nbytes -= sg->length;
996 sg = sg_next(sg);
997 }
998
999 return sg_nents;
1000}
1001
1002/*
1003 * allocate and map the ipsec_esp extended descriptor
1004 */
1005static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1006 int icv_stashing)
1007{
1008 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1010 struct ipsec_esp_edesc *edesc;
1011 int src_nents, dst_nents, alloc_len, dma_len;
1012
1013 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
1014 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
1015 return ERR_PTR(-EINVAL);
1016 }
1017
1018 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
1019 src_nents = (src_nents == 1) ? 0 : src_nents;
1020
1021 if (areq->dst == areq->src) {
1022 dst_nents = src_nents;
1023 } else {
1024 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
1025 dst_nents = (dst_nents == 1) ? 0 : src_nents;
1026 }
1027
1028 /*
1029 * allocate space for base edesc plus the link tables,
1030 * allowing for a separate entry for the generated ICV (+ 1),
1031 * and the ICV data itself
1032 */
1033 alloc_len = sizeof(struct ipsec_esp_edesc);
1034 if (src_nents || dst_nents) {
1035 dma_len = (src_nents + dst_nents + 1) *
1036 sizeof(struct talitos_ptr) + ctx->authsize;
1037 alloc_len += dma_len;
1038 } else {
1039 dma_len = 0;
1040 alloc_len += icv_stashing ? ctx->authsize : 0;
1041 }
1042
1043 edesc = kmalloc(alloc_len, GFP_DMA);
1044 if (!edesc) {
1045 dev_err(ctx->dev, "could not allocate edescriptor\n");
1046 return ERR_PTR(-ENOMEM);
1047 }
1048
1049 edesc->src_nents = src_nents;
1050 edesc->dst_nents = dst_nents;
1051 edesc->dma_len = dma_len;
1052 edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
1053 edesc->dma_len, DMA_BIDIRECTIONAL);
1054
1055 return edesc;
1056}
1057
1058static int aead_authenc_encrypt(struct aead_request *req)
1059{
1060 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1061 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1062 struct ipsec_esp_edesc *edesc;
1063
1064 /* allocate extended descriptor */
1065 edesc = ipsec_esp_edesc_alloc(req, 0);
1066 if (IS_ERR(edesc))
1067 return PTR_ERR(edesc);
1068
1069 /* set encrypt */
1070 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1071
1072 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1073}
1074
1075static int aead_authenc_decrypt(struct aead_request *req)
1076{
1077 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1078 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1079 unsigned int authsize = ctx->authsize;
1080 struct ipsec_esp_edesc *edesc;
1081 struct scatterlist *sg;
1082 void *icvdata;
1083
1084 req->cryptlen -= authsize;
1085
1086 /* allocate extended descriptor */
1087 edesc = ipsec_esp_edesc_alloc(req, 1);
1088 if (IS_ERR(edesc))
1089 return PTR_ERR(edesc);
1090
1091 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1092 if (edesc->dma_len)
1093 icvdata = &edesc->link_tbl[edesc->src_nents +
1094 edesc->dst_nents + 1];
1095 else
1096 icvdata = &edesc->link_tbl[0];
1097
1098 sg = sg_last(req->src, edesc->src_nents ? : 1);
1099
1100 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1101 ctx->authsize);
1102
1103 /* decrypt */
1104 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1105
1106 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
1107}
1108
1109static int aead_authenc_givencrypt(
1110 struct aead_givcrypt_request *req)
1111{
1112 struct aead_request *areq = &req->areq;
1113 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1114 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1115 struct ipsec_esp_edesc *edesc;
1116
1117 /* allocate extended descriptor */
1118 edesc = ipsec_esp_edesc_alloc(areq, 0);
1119 if (IS_ERR(edesc))
1120 return PTR_ERR(edesc);
1121
1122 /* set encrypt */
1123 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1124
1125 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1126
1127 return ipsec_esp(edesc, areq, req->giv, req->seq,
1128 ipsec_esp_encrypt_done);
1129}
1130
1131struct talitos_alg_template {
1132 char name[CRYPTO_MAX_ALG_NAME];
1133 char driver_name[CRYPTO_MAX_ALG_NAME];
1134 unsigned int blocksize;
1135 struct aead_alg aead;
1136 struct device *dev;
1137 __be32 desc_hdr_template;
1138};
1139
1140static struct talitos_alg_template driver_algs[] = {
1141 /* single-pass ipsec_esp descriptor */
1142 {
1143 .name = "authenc(hmac(sha1),cbc(aes))",
1144 .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1145 .blocksize = AES_BLOCK_SIZE,
1146 .aead = {
1147 .setkey = aead_authenc_setkey,
1148 .setauthsize = aead_authenc_setauthsize,
1149 .encrypt = aead_authenc_encrypt,
1150 .decrypt = aead_authenc_decrypt,
1151 .givencrypt = aead_authenc_givencrypt,
1152 .geniv = "<built-in>",
1153 .ivsize = AES_BLOCK_SIZE,
1154 .maxauthsize = SHA1_DIGEST_SIZE,
1155 },
1156 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1157 DESC_HDR_SEL0_AESU |
1158 DESC_HDR_MODE0_AESU_CBC |
1159 DESC_HDR_SEL1_MDEUA |
1160 DESC_HDR_MODE1_MDEU_INIT |
1161 DESC_HDR_MODE1_MDEU_PAD |
1162 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1163 },
1164 {
1165 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1166 .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1167 .blocksize = DES3_EDE_BLOCK_SIZE,
1168 .aead = {
1169 .setkey = aead_authenc_setkey,
1170 .setauthsize = aead_authenc_setauthsize,
1171 .encrypt = aead_authenc_encrypt,
1172 .decrypt = aead_authenc_decrypt,
1173 .givencrypt = aead_authenc_givencrypt,
1174 .geniv = "<built-in>",
1175 .ivsize = DES3_EDE_BLOCK_SIZE,
1176 .maxauthsize = SHA1_DIGEST_SIZE,
1177 },
1178 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1179 DESC_HDR_SEL0_DEU |
1180 DESC_HDR_MODE0_DEU_CBC |
1181 DESC_HDR_MODE0_DEU_3DES |
1182 DESC_HDR_SEL1_MDEUA |
1183 DESC_HDR_MODE1_MDEU_INIT |
1184 DESC_HDR_MODE1_MDEU_PAD |
1185 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1186 },
1187 {
1188 .name = "authenc(hmac(sha256),cbc(aes))",
1189 .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1190 .blocksize = AES_BLOCK_SIZE,
1191 .aead = {
1192 .setkey = aead_authenc_setkey,
1193 .setauthsize = aead_authenc_setauthsize,
1194 .encrypt = aead_authenc_encrypt,
1195 .decrypt = aead_authenc_decrypt,
1196 .givencrypt = aead_authenc_givencrypt,
1197 .geniv = "<built-in>",
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA256_DIGEST_SIZE,
1200 },
1201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1202 DESC_HDR_SEL0_AESU |
1203 DESC_HDR_MODE0_AESU_CBC |
1204 DESC_HDR_SEL1_MDEUA |
1205 DESC_HDR_MODE1_MDEU_INIT |
1206 DESC_HDR_MODE1_MDEU_PAD |
1207 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1208 },
1209 {
1210 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1211 .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1212 .blocksize = DES3_EDE_BLOCK_SIZE,
1213 .aead = {
1214 .setkey = aead_authenc_setkey,
1215 .setauthsize = aead_authenc_setauthsize,
1216 .encrypt = aead_authenc_encrypt,
1217 .decrypt = aead_authenc_decrypt,
1218 .givencrypt = aead_authenc_givencrypt,
1219 .geniv = "<built-in>",
1220 .ivsize = DES3_EDE_BLOCK_SIZE,
1221 .maxauthsize = SHA256_DIGEST_SIZE,
1222 },
1223 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1224 DESC_HDR_SEL0_DEU |
1225 DESC_HDR_MODE0_DEU_CBC |
1226 DESC_HDR_MODE0_DEU_3DES |
1227 DESC_HDR_SEL1_MDEUA |
1228 DESC_HDR_MODE1_MDEU_INIT |
1229 DESC_HDR_MODE1_MDEU_PAD |
1230 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1231 },
1232 {
1233 .name = "authenc(hmac(md5),cbc(aes))",
1234 .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1235 .blocksize = AES_BLOCK_SIZE,
1236 .aead = {
1237 .setkey = aead_authenc_setkey,
1238 .setauthsize = aead_authenc_setauthsize,
1239 .encrypt = aead_authenc_encrypt,
1240 .decrypt = aead_authenc_decrypt,
1241 .givencrypt = aead_authenc_givencrypt,
1242 .geniv = "<built-in>",
1243 .ivsize = AES_BLOCK_SIZE,
1244 .maxauthsize = MD5_DIGEST_SIZE,
1245 },
1246 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1247 DESC_HDR_SEL0_AESU |
1248 DESC_HDR_MODE0_AESU_CBC |
1249 DESC_HDR_SEL1_MDEUA |
1250 DESC_HDR_MODE1_MDEU_INIT |
1251 DESC_HDR_MODE1_MDEU_PAD |
1252 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1253 },
1254 {
1255 .name = "authenc(hmac(md5),cbc(des3_ede))",
1256 .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1257 .blocksize = DES3_EDE_BLOCK_SIZE,
1258 .aead = {
1259 .setkey = aead_authenc_setkey,
1260 .setauthsize = aead_authenc_setauthsize,
1261 .encrypt = aead_authenc_encrypt,
1262 .decrypt = aead_authenc_decrypt,
1263 .givencrypt = aead_authenc_givencrypt,
1264 .geniv = "<built-in>",
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .maxauthsize = MD5_DIGEST_SIZE,
1267 },
1268 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1269 DESC_HDR_SEL0_DEU |
1270 DESC_HDR_MODE0_DEU_CBC |
1271 DESC_HDR_MODE0_DEU_3DES |
1272 DESC_HDR_SEL1_MDEUA |
1273 DESC_HDR_MODE1_MDEU_INIT |
1274 DESC_HDR_MODE1_MDEU_PAD |
1275 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1276 }
1277};
1278
1279struct talitos_crypto_alg {
1280 struct list_head entry;
1281 struct device *dev;
1282 __be32 desc_hdr_template;
1283 struct crypto_alg crypto_alg;
1284};
1285
1286static int talitos_cra_init(struct crypto_tfm *tfm)
1287{
1288 struct crypto_alg *alg = tfm->__crt_alg;
1289 struct talitos_crypto_alg *talitos_alg =
1290 container_of(alg, struct talitos_crypto_alg, crypto_alg);
1291 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1292
1293 /* update context with ptr to dev */
1294 ctx->dev = talitos_alg->dev;
1295 /* copy descriptor header template value */
1296 ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1297
1298 /* random first IV */
1299 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1300
1301 return 0;
1302}
1303
1304/*
1305 * given the alg's descriptor header template, determine whether descriptor
1306 * type and primary/secondary execution units required match the hw
1307 * capabilities description provided in the device tree node.
1308 */
1309static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1310{
1311 struct talitos_private *priv = dev_get_drvdata(dev);
1312 int ret;
1313
1314 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1315 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1316
1317 if (SECONDARY_EU(desc_hdr_template))
1318 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1319 & priv->exec_units);
1320
1321 return ret;
1322}
1323
1324static int __devexit talitos_remove(struct of_device *ofdev)
1325{
1326 struct device *dev = &ofdev->dev;
1327 struct talitos_private *priv = dev_get_drvdata(dev);
1328 struct talitos_crypto_alg *t_alg, *n;
1329 int i;
1330
1331 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1332 crypto_unregister_alg(&t_alg->crypto_alg);
1333 list_del(&t_alg->entry);
1334 kfree(t_alg);
1335 }
1336
1337 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1338 talitos_unregister_rng(dev);
1339
1340 kfree(priv->tail);
1341 kfree(priv->head);
1342
1343 if (priv->fifo)
1344 for (i = 0; i < priv->num_channels; i++)
1345 kfree(priv->fifo[i]);
1346
1347 kfree(priv->fifo);
1348 kfree(priv->head_lock);
1349 kfree(priv->tail_lock);
1350
1351 if (priv->irq != NO_IRQ) {
1352 free_irq(priv->irq, dev);
1353 irq_dispose_mapping(priv->irq);
1354 }
1355
1356 tasklet_kill(&priv->done_task);
1357 tasklet_kill(&priv->error_task);
1358
1359 iounmap(priv->reg);
1360
1361 dev_set_drvdata(dev, NULL);
1362
1363 kfree(priv);
1364
1365 return 0;
1366}
1367
1368static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1369 struct talitos_alg_template
1370 *template)
1371{
1372 struct talitos_crypto_alg *t_alg;
1373 struct crypto_alg *alg;
1374
1375 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1376 if (!t_alg)
1377 return ERR_PTR(-ENOMEM);
1378
1379 alg = &t_alg->crypto_alg;
1380
1381 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1382 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1383 template->driver_name);
1384 alg->cra_module = THIS_MODULE;
1385 alg->cra_init = talitos_cra_init;
1386 alg->cra_priority = TALITOS_CRA_PRIORITY;
1387 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1388 alg->cra_blocksize = template->blocksize;
1389 alg->cra_alignmask = 0;
1390 alg->cra_type = &crypto_aead_type;
1391 alg->cra_ctxsize = sizeof(struct talitos_ctx);
1392 alg->cra_u.aead = template->aead;
1393
1394 t_alg->desc_hdr_template = template->desc_hdr_template;
1395 t_alg->dev = dev;
1396
1397 return t_alg;
1398}
1399
1400static int talitos_probe(struct of_device *ofdev,
1401 const struct of_device_id *match)
1402{
1403 struct device *dev = &ofdev->dev;
1404 struct device_node *np = ofdev->node;
1405 struct talitos_private *priv;
1406 const unsigned int *prop;
1407 int i, err;
1408
1409 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1410 if (!priv)
1411 return -ENOMEM;
1412
1413 dev_set_drvdata(dev, priv);
1414
1415 priv->ofdev = ofdev;
1416
1417 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1418 tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
1419
1420 priv->irq = irq_of_parse_and_map(np, 0);
1421
1422 if (priv->irq == NO_IRQ) {
1423 dev_err(dev, "failed to map irq\n");
1424 err = -EINVAL;
1425 goto err_out;
1426 }
1427
1428 /* get the irq line */
1429 err = request_irq(priv->irq, talitos_interrupt, 0,
1430 dev_driver_string(dev), dev);
1431 if (err) {
1432 dev_err(dev, "failed to request irq %d\n", priv->irq);
1433 irq_dispose_mapping(priv->irq);
1434 priv->irq = NO_IRQ;
1435 goto err_out;
1436 }
1437
1438 priv->reg = of_iomap(np, 0);
1439 if (!priv->reg) {
1440 dev_err(dev, "failed to of_iomap\n");
1441 err = -ENOMEM;
1442 goto err_out;
1443 }
1444
1445 /* get SEC version capabilities from device tree */
1446 prop = of_get_property(np, "fsl,num-channels", NULL);
1447 if (prop)
1448 priv->num_channels = *prop;
1449
1450 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1451 if (prop)
1452 priv->chfifo_len = *prop;
1453
1454 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1455 if (prop)
1456 priv->exec_units = *prop;
1457
1458 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1459 if (prop)
1460 priv->desc_types = *prop;
1461
1462 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1463 !priv->exec_units || !priv->desc_types) {
1464 dev_err(dev, "invalid property data in device tree node\n");
1465 err = -EINVAL;
1466 goto err_out;
1467 }
1468
1469 of_node_put(np);
1470 np = NULL;
1471
1472 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1473 GFP_KERNEL);
1474 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1475 GFP_KERNEL);
1476 if (!priv->head_lock || !priv->tail_lock) {
1477 dev_err(dev, "failed to allocate fifo locks\n");
1478 err = -ENOMEM;
1479 goto err_out;
1480 }
1481
1482 for (i = 0; i < priv->num_channels; i++) {
1483 spin_lock_init(&priv->head_lock[i]);
1484 spin_lock_init(&priv->tail_lock[i]);
1485 }
1486
1487 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1488 priv->num_channels, GFP_KERNEL);
1489 if (!priv->fifo) {
1490 dev_err(dev, "failed to allocate request fifo\n");
1491 err = -ENOMEM;
1492 goto err_out;
1493 }
1494
1495 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1496
1497 for (i = 0; i < priv->num_channels; i++) {
1498 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
1499 priv->fifo_len, GFP_KERNEL);
1500 if (!priv->fifo[i]) {
1501 dev_err(dev, "failed to allocate request fifo %d\n", i);
1502 err = -ENOMEM;
1503 goto err_out;
1504 }
1505 }
1506
1507 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1508 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1509 if (!priv->head || !priv->tail) {
1510 dev_err(dev, "failed to allocate request index space\n");
1511 err = -ENOMEM;
1512 goto err_out;
1513 }
1514
1515 /* reset and initialize the h/w */
1516 err = init_device(dev);
1517 if (err) {
1518 dev_err(dev, "failed to initialize device\n");
1519 goto err_out;
1520 }
1521
1522 /* register the RNG, if available */
1523 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1524 err = talitos_register_rng(dev);
1525 if (err) {
1526 dev_err(dev, "failed to register hwrng: %d\n", err);
1527 goto err_out;
1528 } else
1529 dev_info(dev, "hwrng\n");
1530 }
1531
1532 /* register crypto algorithms the device supports */
1533 INIT_LIST_HEAD(&priv->alg_list);
1534
1535 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1536 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1537 struct talitos_crypto_alg *t_alg;
1538
1539 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1540 if (IS_ERR(t_alg)) {
1541 err = PTR_ERR(t_alg);
1542 goto err_out;
1543 }
1544
1545 err = crypto_register_alg(&t_alg->crypto_alg);
1546 if (err) {
1547 dev_err(dev, "%s alg registration failed\n",
1548 t_alg->crypto_alg.cra_driver_name);
1549 kfree(t_alg);
1550 } else {
1551 list_add_tail(&t_alg->entry, &priv->alg_list);
1552 dev_info(dev, "%s\n",
1553 t_alg->crypto_alg.cra_driver_name);
1554 }
1555 }
1556 }
1557
1558 return 0;
1559
1560err_out:
1561 talitos_remove(ofdev);
1562 if (np)
1563 of_node_put(np);
1564
1565 return err;
1566}
1567
1568static struct of_device_id talitos_match[] = {
1569 {
1570 .compatible = "fsl,sec2.0",
1571 },
1572 {},
1573};
1574MODULE_DEVICE_TABLE(of, talitos_match);
1575
1576static struct of_platform_driver talitos_driver = {
1577 .name = "talitos",
1578 .match_table = talitos_match,
1579 .probe = talitos_probe,
1580 .remove = __devexit_p(talitos_remove),
1581};
1582
1583static int __init talitos_init(void)
1584{
1585 return of_register_platform_driver(&talitos_driver);
1586}
1587module_init(talitos_init);
1588
1589static void __exit talitos_exit(void)
1590{
1591 of_unregister_platform_driver(&talitos_driver);
1592}
1593module_exit(talitos_exit);
1594
1595MODULE_LICENSE("GPL");
1596MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
1597MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
new file mode 100644
index 000000000000..c48a405abf70
--- /dev/null
+++ b/drivers/crypto/talitos.h
@@ -0,0 +1,199 @@
1/*
2 * Freescale SEC (talitos) device register and descriptor header defines
3 *
4 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31/*
32 * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
33 */
34
35/* global register offset addresses */
36#define TALITOS_MCR 0x1030 /* master control register */
37#define TALITOS_MCR_LO 0x1038
38#define TALITOS_MCR_SWR 0x1 /* s/w reset */
39#define TALITOS_IMR 0x1008 /* interrupt mask register */
40#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
41#define TALITOS_IMR_LO 0x100C
42#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
43#define TALITOS_ISR 0x1010 /* interrupt status register */
44#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
45#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
46#define TALITOS_ISR_LO 0x1014
47#define TALITOS_ICR 0x1018 /* interrupt clear register */
48#define TALITOS_ICR_LO 0x101C
49
50/* channel register address stride */
51#define TALITOS_CH_STRIDE 0x100
52
53/* channel configuration register */
54#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
55#define TALITOS_CCCR_CONT 0x2 /* channel continue */
56#define TALITOS_CCCR_RESET 0x1 /* channel reset */
57#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
58#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
59#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
60#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
61
62/* CCPSR: channel pointer status register */
63#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
64#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
65#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
66#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
67#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
68#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */
69#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */
70#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */
71#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */
72#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */
73#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */
74#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */
75#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */
76#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
77
78/* channel fetch fifo register */
79#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
80#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
81
82/* current descriptor pointer register */
83#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
84#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
85
86/* descriptor buffer register */
87#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
88#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
89
90/* gather link table */
91#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
92#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
93
94/* scatter link table */
95#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
96#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
97
98/* execution unit interrupt status registers */
99#define TALITOS_DEUISR 0x2030 /* DES unit */
100#define TALITOS_DEUISR_LO 0x2034
101#define TALITOS_AESUISR 0x4030 /* AES unit */
102#define TALITOS_AESUISR_LO 0x4034
103#define TALITOS_MDEUISR 0x6030 /* message digest unit */
104#define TALITOS_MDEUISR_LO 0x6034
105#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
106#define TALITOS_AFEUISR_LO 0x8034
107#define TALITOS_RNGUISR 0xa030 /* random number unit */
108#define TALITOS_RNGUISR_LO 0xa034
109#define TALITOS_RNGUSR 0xa028 /* rng status */
110#define TALITOS_RNGUSR_LO 0xa02c
111#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */
112#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */
113#define TALITOS_RNGUDSR 0xa010 /* data size */
114#define TALITOS_RNGUDSR_LO 0xa014
115#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */
116#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */
117#define TALITOS_RNGURCR 0xa018 /* reset control */
118#define TALITOS_RNGURCR_LO 0xa01c
119#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */
120#define TALITOS_PKEUISR 0xc030 /* public key unit */
121#define TALITOS_PKEUISR_LO 0xc034
122#define TALITOS_KEUISR 0xe030 /* kasumi unit */
123#define TALITOS_KEUISR_LO 0xe034
124#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
125#define TALITOS_CRCUISR_LO 0xf034
126
127/*
128 * talitos descriptor header (hdr) bits
129 */
130
131/* written back when done */
132#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
133
134/* primary execution unit select */
135#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
136#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
137#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
138#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
139#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
140#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
141#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
142#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
143#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
144#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
145
146/* primary execution unit mode (MODE0) and derivatives */
147#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
148#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
149#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
150#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
151#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
152#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
153#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
154#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
155#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
156#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
157#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
158 DESC_HDR_MODE0_MDEU_HMAC)
159#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
160 DESC_HDR_MODE0_MDEU_HMAC)
161#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \
162 DESC_HDR_MODE0_MDEU_HMAC)
163
164/* secondary execution unit select (SEL1) */
165#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
166#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
167#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
168#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
169
170/* secondary execution unit mode (MODE1) and derivatives */
171#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
172#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
173#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
174#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
175#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
176#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
177#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
178 DESC_HDR_MODE1_MDEU_HMAC)
179#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
180 DESC_HDR_MODE1_MDEU_HMAC)
181#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
182 DESC_HDR_MODE1_MDEU_HMAC)
183
184/* direction of overall data flow (DIR) */
185#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
186
187/* request done notification (DN) */
188#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
189
190/* descriptor types */
191#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
192#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
193#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
194#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
195
196/* link table extent field bits */
197#define DESC_PTR_LNKTBL_JUMP 0x80
198#define DESC_PTR_LNKTBL_RETURN 0x02
199#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index dc2cec6127d1..ebb9e51deb0c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -26,6 +26,16 @@ config EDD_OFF
26 kernel. Say N if you want EDD enabled by default. EDD can be dynamically set 26 kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
27 using the kernel parameter 'edd={on|skipmbr|off}'. 27 using the kernel parameter 'edd={on|skipmbr|off}'.
28 28
29config FIRMWARE_MEMMAP
30 bool "Add firmware-provided memory map to sysfs" if EMBEDDED
31 default (X86_64 || X86_32)
32 help
33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
34 That memory map is used for example by kexec to set up parameter area
35 for the next kernel, but can also be used for debugging purposes.
36
37 See also Documentation/ABI/testing/sysfs-firmware-memmap.
38
29config EFI_VARS 39config EFI_VARS
30 tristate "EFI Variable Support via sysfs" 40 tristate "EFI Variable Support via sysfs"
31 depends on EFI 41 depends on EFI
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4c9147154df8..1c3c17343dbe 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
10obj-$(CONFIG_DMIID) += dmi-id.o 10obj-$(CONFIG_DMIID) += dmi-id.o
11obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 11obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
12obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 12obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
13obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5e3ed7e903b..455575be3560 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -8,6 +8,11 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <asm/dmi.h> 9#include <asm/dmi.h>
10 10
11/*
12 * DMI stands for "Desktop Management Interface". It is part
13 * of and an antecedent to, SMBIOS, which stands for System
14 * Management BIOS. See further: http://www.dmtf.org/standards
15 */
11static char dmi_empty_string[] = " "; 16static char dmi_empty_string[] = " ";
12 17
13static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) 18static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
new file mode 100644
index 000000000000..e23399c7f773
--- /dev/null
+++ b/drivers/firmware/memmap.c
@@ -0,0 +1,205 @@
1/*
2 * linux/drivers/firmware/memmap.c
3 * Copyright (C) 2008 SUSE LINUX Products GmbH
4 * by Bernhard Walle <bwalle@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2.0 as published by
8 * the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/string.h>
18#include <linux/firmware-map.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/bootmem.h>
23
24/*
25 * Data types ------------------------------------------------------------------
26 */
27
28/*
29 * Firmware map entry. Because firmware memory maps are flat and not
30 * hierarchical, it's ok to organise them in a linked list. No parent
31 * information is necessary as for the resource tree.
32 */
33struct firmware_map_entry {
34 resource_size_t start; /* start of the memory range */
35 resource_size_t end; /* end of the memory range (incl.) */
36 const char *type; /* type of the memory range */
37 struct list_head list; /* entry for the linked list */
38 struct kobject kobj; /* kobject for each entry */
39};
40
41/*
42 * Forward declarations --------------------------------------------------------
43 */
44static ssize_t memmap_attr_show(struct kobject *kobj,
45 struct attribute *attr, char *buf);
46static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
47static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
48static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
49
50/*
51 * Static data -----------------------------------------------------------------
52 */
53
54struct memmap_attribute {
55 struct attribute attr;
56 ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
57};
58
59struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
60struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
61struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
62
63/*
64 * These are default attributes that are added for every memmap entry.
65 */
66static struct attribute *def_attrs[] = {
67 &memmap_start_attr.attr,
68 &memmap_end_attr.attr,
69 &memmap_type_attr.attr,
70 NULL
71};
72
73static struct sysfs_ops memmap_attr_ops = {
74 .show = memmap_attr_show,
75};
76
77static struct kobj_type memmap_ktype = {
78 .sysfs_ops = &memmap_attr_ops,
79 .default_attrs = def_attrs,
80};
81
82/*
83 * Registration functions ------------------------------------------------------
84 */
85
86/*
87 * Firmware memory map entries
88 */
89static LIST_HEAD(map_entries);
90
91/**
92 * Common implementation of firmware_map_add() and firmware_map_add_early()
93 * which expects a pre-allocated struct firmware_map_entry.
94 *
95 * @start: Start of the memory range.
96 * @end: End of the memory range (inclusive).
97 * @type: Type of the memory range.
98 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
99 * entry.
100 */
101static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
102 const char *type,
103 struct firmware_map_entry *entry)
104{
105 BUG_ON(start > end);
106
107 entry->start = start;
108 entry->end = end;
109 entry->type = type;
110 INIT_LIST_HEAD(&entry->list);
111 kobject_init(&entry->kobj, &memmap_ktype);
112
113 list_add_tail(&entry->list, &map_entries);
114
115 return 0;
116}
117
118/*
119 * See <linux/firmware-map.h> for documentation.
120 */
121int firmware_map_add(resource_size_t start, resource_size_t end,
122 const char *type)
123{
124 struct firmware_map_entry *entry;
125
126 entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
127 WARN_ON(!entry);
128 if (!entry)
129 return -ENOMEM;
130
131 return firmware_map_add_entry(start, end, type, entry);
132}
133
134/*
135 * See <linux/firmware-map.h> for documentation.
136 */
137int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
138 const char *type)
139{
140 struct firmware_map_entry *entry;
141
142 entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
143 WARN_ON(!entry);
144 if (!entry)
145 return -ENOMEM;
146
147 return firmware_map_add_entry(start, end, type, entry);
148}
149
150/*
151 * Sysfs functions -------------------------------------------------------------
152 */
153
154static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
155{
156 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->start);
157}
158
159static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
160{
161 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->end);
162}
163
164static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
165{
166 return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
167}
168
169#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
170#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
171
172static ssize_t memmap_attr_show(struct kobject *kobj,
173 struct attribute *attr, char *buf)
174{
175 struct firmware_map_entry *entry = to_memmap_entry(kobj);
176 struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
177
178 return memmap_attr->show(entry, buf);
179}
180
181/*
182 * Initialises stuff and adds the entries in the map_entries list to
183 * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
184 * must be called before late_initcall.
185 */
186static int __init memmap_init(void)
187{
188 int i = 0;
189 struct firmware_map_entry *entry;
190 struct kset *memmap_kset;
191
192 memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
193 WARN_ON(!memmap_kset);
194 if (!memmap_kset)
195 return -ENOMEM;
196
197 list_for_each_entry(entry, &map_entries, list) {
198 entry->kobj.kset = memmap_kset;
199 kobject_add(&entry->kobj, NULL, "%d", i++);
200 }
201
202 return 0;
203}
204late_initcall(memmap_init);
205
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
new file mode 100644
index 000000000000..de566cf0414c
--- /dev/null
+++ b/drivers/gpu/Makefile
@@ -0,0 +1 @@
obj-y += drm/
diff --git a/drivers/char/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 610d6fd5bb50..610d6fd5bb50 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
new file mode 100644
index 000000000000..e9f9a97ae00a
--- /dev/null
+++ b/drivers/gpu/drm/Makefile
@@ -0,0 +1,26 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
12
13drm-$(CONFIG_COMPAT) += drm_ioc32.o
14
15obj-$(CONFIG_DRM) += drm.o
16obj-$(CONFIG_DRM_TDFX) += tdfx/
17obj-$(CONFIG_DRM_R128) += r128/
18obj-$(CONFIG_DRM_RADEON)+= radeon/
19obj-$(CONFIG_DRM_MGA) += mga/
20obj-$(CONFIG_DRM_I810) += i810/
21obj-$(CONFIG_DRM_I830) += i830/
22obj-$(CONFIG_DRM_I915) += i915/
23obj-$(CONFIG_DRM_SIS) += sis/
24obj-$(CONFIG_DRM_SAVAGE)+= savage/
25obj-$(CONFIG_DRM_VIA) +=via/
26
diff --git a/drivers/char/drm/README.drm b/drivers/gpu/drm/README.drm
index b5b332722581..b5b332722581 100644
--- a/drivers/char/drm/README.drm
+++ b/drivers/gpu/drm/README.drm
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index c533d0c9ec61..c533d0c9ec61 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index aefa5ac4c0b1..aefa5ac4c0b1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
diff --git a/drivers/char/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index a73462723d2d..a73462723d2d 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bde64b84166e..bde64b84166e 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
diff --git a/drivers/char/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index d505f695421f..d505f695421f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
diff --git a/drivers/char/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 7a8e2fba4678..7a8e2fba4678 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c5..1839c57663c5 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
diff --git a/drivers/char/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 564138714bb5..564138714bb5 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
diff --git a/drivers/char/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index d2e6da85f58a..851a53f1acce 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,6 +37,7 @@
37#include "drmP.h" 37#include "drmP.h"
38#include "drm_sarea.h" 38#include "drm_sarea.h"
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/smp_lock.h>
40 41
41static int drm_open_helper(struct inode *inode, struct file *filp, 42static int drm_open_helper(struct inode *inode, struct file *filp,
42 struct drm_device * dev); 43 struct drm_device * dev);
@@ -174,12 +175,14 @@ int drm_stub_open(struct inode *inode, struct file *filp)
174 175
175 DRM_DEBUG("\n"); 176 DRM_DEBUG("\n");
176 177
178 /* BKL pushdown: note that nothing else serializes idr_find() */
179 lock_kernel();
177 minor = idr_find(&drm_minors_idr, minor_id); 180 minor = idr_find(&drm_minors_idr, minor_id);
178 if (!minor) 181 if (!minor)
179 return -ENODEV; 182 goto out;
180 183
181 if (!(dev = minor->dev)) 184 if (!(dev = minor->dev))
182 return -ENODEV; 185 goto out;
183 186
184 old_fops = filp->f_op; 187 old_fops = filp->f_op;
185 filp->f_op = fops_get(&dev->driver->fops); 188 filp->f_op = fops_get(&dev->driver->fops);
@@ -189,6 +192,8 @@ int drm_stub_open(struct inode *inode, struct file *filp)
189 } 192 }
190 fops_put(old_fops); 193 fops_put(old_fops);
191 194
195out:
196 unlock_kernel();
192 return err; 197 return err;
193} 198}
194 199
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 33160673a7b7..33160673a7b7 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdcb..90f5a8d9bdcb 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 16829fb3089d..16829fb3089d 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
diff --git a/drivers/char/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 089c015c01d1..089c015c01d1 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
diff --git a/drivers/char/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 0998723cde79..0998723cde79 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
diff --git a/drivers/char/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 845081b44f63..845081b44f63 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
diff --git a/drivers/char/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index dcff9e9b52e3..dcff9e9b52e3 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
diff --git a/drivers/char/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b55d5bc6ea61..b55d5bc6ea61 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
diff --git a/drivers/char/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 93b1e0475c93..93b1e0475c93 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index b2b0f3d41714..b2b0f3d41714 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
diff --git a/drivers/char/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 926f146390ce..926f146390ce 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
diff --git a/drivers/char/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c2f584f3b46c..c2f584f3b46c 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index af211a0ef179..af211a0ef179 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
diff --git a/drivers/char/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c234c6f24a8d..c234c6f24a8d 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
new file mode 100644
index 000000000000..43844ecafcc5
--- /dev/null
+++ b/drivers/gpu/drm/i810/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i810-y := i810_drv.o i810_dma.o
7
8obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/char/drm/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e5de8ea41544..e5de8ea41544 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
diff --git a/drivers/char/drm/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a817966..fabb9a817966 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
diff --git a/drivers/char/drm/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 0118849a5672..0118849a5672 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
new file mode 100644
index 000000000000..c642ee0b238c
--- /dev/null
+++ b/drivers/gpu/drm/i830/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i830-y := i830_drv.o i830_dma.o i830_irq.o
7
8obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/char/drm/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index a86ab30b4620..a86ab30b4620 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
diff --git a/drivers/char/drm/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a623..389597e4a623 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
diff --git a/drivers/char/drm/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
index b5bf8cc0fdaa..b5bf8cc0fdaa 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/gpu/drm/i830/i830_drv.h
diff --git a/drivers/char/drm/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
index 91ec2bb497e9..91ec2bb497e9 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/gpu/drm/i830/i830_irq.c
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
7
8i915-$(CONFIG_COMPAT) += i915_ioc32.o
9
10obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/char/drm/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 88974342933c..88974342933c 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
diff --git a/drivers/char/drm/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 93aed1c38bd2..93aed1c38bd2 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
diff --git a/drivers/char/drm/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d7326d92a237..d7326d92a237 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
diff --git a/drivers/char/drm/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b75..1fe68a251b75 100644
--- a/drivers/char/drm/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
diff --git a/drivers/char/drm/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df036118b8b1..df036118b8b1 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
diff --git a/drivers/char/drm/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 6126a60dc9cb..6126a60dc9cb 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
new file mode 100644
index 000000000000..60684785c203
--- /dev/null
+++ b/drivers/gpu/drm/mga/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
7
8mga-$(CONFIG_COMPAT) += mga_ioc32.o
9
10obj-$(CONFIG_DRM_MGA) += mga.o
11
diff --git a/drivers/char/drm/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c1d12dbfa8d8..c1d12dbfa8d8 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
diff --git a/drivers/char/drm/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5572939fc7d1..5572939fc7d1 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
diff --git a/drivers/char/drm/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index f6ebd24bd587..f6ebd24bd587 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478ddee..30d00478ddee 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
diff --git a/drivers/char/drm/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 9302cb8f0f83..9302cb8f0f83 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
diff --git a/drivers/char/drm/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index d3f8aade07b3..d3f8aade07b3 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
diff --git a/drivers/char/drm/mga_ucode.h b/drivers/gpu/drm/mga/mga_ucode.h
index b611e27470e1..b611e27470e1 100644
--- a/drivers/char/drm/mga_ucode.h
+++ b/drivers/gpu/drm/mga/mga_ucode.h
diff --git a/drivers/char/drm/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 651b93c8ab5d..651b93c8ab5d 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
new file mode 100644
index 000000000000..1cc72ae3a880
--- /dev/null
+++ b/drivers/gpu/drm/r128/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
7
8r128-$(CONFIG_COMPAT) += r128_ioc32.o
9
10obj-$(CONFIG_DRM_R128) += r128.o
diff --git a/drivers/char/drm/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index c31afbde62e7..c31afbde62e7 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
diff --git a/drivers/char/drm/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 6108e7587e12..6108e7587e12 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
diff --git a/drivers/char/drm/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 011105e51ac6..011105e51ac6 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
diff --git a/drivers/char/drm/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee84..d3cb676eee84 100644
--- a/drivers/char/drm/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
diff --git a/drivers/char/drm/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index c76fdca7662d..c76fdca7662d 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
diff --git a/drivers/char/drm/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 51a9afce7b9b..51a9afce7b9b 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 000000000000..feb521ebc393
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
7
8radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
9
10obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 702df45320f7..702df45320f7 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
diff --git a/drivers/char/drm/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index a6802f26afc4..a6802f26afc4 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index e53158f0ecb5..e53158f0ecb5 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 349ac3d3b848..349ac3d3b848 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3f0eca957aa7..3f0eca957aa7 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
diff --git a/drivers/char/drm/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 56decda2a71f..56decda2a71f 100644
--- a/drivers/char/drm/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index ee40d197deb7..ee40d197deb7 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index 4af5286a36fb..4af5286a36fb 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
diff --git a/drivers/char/drm/radeon_microcode.h b/drivers/gpu/drm/radeon/radeon_microcode.h
index a348c9e7db1c..a348c9e7db1c 100644
--- a/drivers/char/drm/radeon_microcode.h
+++ b/drivers/gpu/drm/radeon/radeon_microcode.h
diff --git a/drivers/char/drm/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 11c146b49211..11c146b49211 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
new file mode 100644
index 000000000000..d8f84ac7bb26
--- /dev/null
+++ b/drivers/gpu/drm/savage/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6savage-y := savage_drv.o savage_bci.o savage_state.o
7
8obj-$(CONFIG_DRM_SAVAGE)+= savage.o
9
diff --git a/drivers/char/drm/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index d465b2f9c1cd..d465b2f9c1cd 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
diff --git a/drivers/char/drm/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7c..eee52aa92a7c 100644
--- a/drivers/char/drm/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
diff --git a/drivers/char/drm/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index df2aac6636f7..df2aac6636f7 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
diff --git a/drivers/char/drm/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 5f6238fdf1fa..5f6238fdf1fa 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
new file mode 100644
index 000000000000..441c061c3ad0
--- /dev/null
+++ b/drivers/gpu/drm/sis/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6sis-y := sis_drv.o sis_mm.o
7
8obj-$(CONFIG_DRM_SIS) += sis.o
9
10
diff --git a/drivers/char/drm/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7dacc64e9b56..7dacc64e9b56 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
diff --git a/drivers/char/drm/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index ef940bad63f7..ef940bad63f7 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
diff --git a/drivers/char/drm/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index b3878770fce1..b3878770fce1 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
new file mode 100644
index 000000000000..0379f294b32a
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6tdfx-y := tdfx_drv.o
7
8obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/char/drm/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b2..012ff2e356b2 100644
--- a/drivers/char/drm/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
diff --git a/drivers/char/drm/tdfx_drv.h b/drivers/gpu/drm/tdfx/tdfx_drv.h
index 84204ec1b046..84204ec1b046 100644
--- a/drivers/char/drm/tdfx_drv.h
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.h
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
new file mode 100644
index 000000000000..d59e258e2c13
--- /dev/null
+++ b/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
7
8obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/char/drm/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
index 462375d543b9..462375d543b9 100644
--- a/drivers/char/drm/via_3d_reg.h
+++ b/drivers/gpu/drm/via/via_3d_reg.h
diff --git a/drivers/char/drm/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 7a339dba6a69..7a339dba6a69 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd07..409e00afdd07 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
index 7408a547a036..7408a547a036 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/gpu/drm/via/via_dmablit.h
diff --git a/drivers/char/drm/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 80c01cdfa37d..80c01cdfa37d 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
diff --git a/drivers/char/drm/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 2daae81874cd..2daae81874cd 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
diff --git a/drivers/char/drm/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c6bb978a1106..c6bb978a1106 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
diff --git a/drivers/char/drm/via_map.c b/drivers/gpu/drm/via/via_map.c
index a967556be014..a967556be014 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
diff --git a/drivers/char/drm/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index e64094916e4f..e64094916e4f 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
diff --git a/drivers/char/drm/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 46a579198747..46a579198747 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
diff --git a/drivers/char/drm/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
index d6f8214b69f5..d6f8214b69f5 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/gpu/drm/via/via_verifier.h
diff --git a/drivers/char/drm/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac12459..6ec04ac12459 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 1ca6f4635eeb..2fde6c63f47d 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -30,6 +30,7 @@
30#include <linux/major.h> 30#include <linux/major.h>
31#include <linux/hid.h> 31#include <linux/hid.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/smp_lock.h>
33 34
34#include <linux/hidraw.h> 35#include <linux/hidraw.h>
35 36
@@ -157,6 +158,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
157 struct hidraw_list *list; 158 struct hidraw_list *list;
158 int err = 0; 159 int err = 0;
159 160
161 lock_kernel();
160 if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { 162 if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) {
161 err = -ENOMEM; 163 err = -ENOMEM;
162 goto out; 164 goto out;
@@ -183,6 +185,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
183out_unlock: 185out_unlock:
184 spin_unlock(&minors_lock); 186 spin_unlock(&minors_lock);
185out: 187out:
188 unlock_kernel();
186 return err; 189 return err;
187 190
188} 191}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index eb69fbadc9cb..dde6ce963a19 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -39,7 +39,6 @@
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/arch/i2c.h> 40#include <asm/arch/i2c.h>
41#include <asm/arch/pxa-regs.h> 41#include <asm/arch/pxa-regs.h>
42#include <asm/arch/pxa2xx-gpio.h>
43 42
44struct pxa_i2c { 43struct pxa_i2c {
45 spinlock_t lock; 44 spinlock_t lock;
@@ -945,32 +944,6 @@ static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
945 .functionality = i2c_pxa_functionality, 944 .functionality = i2c_pxa_functionality,
946}; 945};
947 946
948static void i2c_pxa_enable(struct platform_device *dev)
949{
950 if (cpu_is_pxa27x()) {
951 switch (dev->id) {
952 case 0:
953 pxa_gpio_mode(GPIO117_I2CSCL_MD);
954 pxa_gpio_mode(GPIO118_I2CSDA_MD);
955 break;
956 case 1:
957 local_irq_disable();
958 PCFR |= PCFR_PI2CEN;
959 local_irq_enable();
960 break;
961 }
962 }
963}
964
965static void i2c_pxa_disable(struct platform_device *dev)
966{
967 if (cpu_is_pxa27x() && dev->id == 1) {
968 local_irq_disable();
969 PCFR &= ~PCFR_PI2CEN;
970 local_irq_enable();
971 }
972}
973
974#define res_len(r) ((r)->end - (r)->start + 1) 947#define res_len(r) ((r)->end - (r)->start + 1)
975static int i2c_pxa_probe(struct platform_device *dev) 948static int i2c_pxa_probe(struct platform_device *dev)
976{ 949{
@@ -1036,7 +1009,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
1036#endif 1009#endif
1037 1010
1038 clk_enable(i2c->clk); 1011 clk_enable(i2c->clk);
1039 i2c_pxa_enable(dev);
1040 1012
1041 if (plat) { 1013 if (plat) {
1042 i2c->adap.class = plat->class; 1014 i2c->adap.class = plat->class;
@@ -1080,7 +1052,6 @@ eadapt:
1080 free_irq(irq, i2c); 1052 free_irq(irq, i2c);
1081ereqirq: 1053ereqirq:
1082 clk_disable(i2c->clk); 1054 clk_disable(i2c->clk);
1083 i2c_pxa_disable(dev);
1084 iounmap(i2c->reg_base); 1055 iounmap(i2c->reg_base);
1085eremap: 1056eremap:
1086 clk_put(i2c->clk); 1057 clk_put(i2c->clk);
@@ -1103,7 +1074,6 @@ static int __exit i2c_pxa_remove(struct platform_device *dev)
1103 1074
1104 clk_disable(i2c->clk); 1075 clk_disable(i2c->clk);
1105 clk_put(i2c->clk); 1076 clk_put(i2c->clk);
1106 i2c_pxa_disable(dev);
1107 1077
1108 iounmap(i2c->reg_base); 1078 iounmap(i2c->reg_base);
1109 release_mem_region(i2c->iobase, i2c->iosize); 1079 release_mem_region(i2c->iobase, i2c->iosize);
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index b1b45dddb17e..03a33f1b9cd3 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -72,7 +72,7 @@ struct isp1301 {
72}; 72};
73 73
74 74
75/* bits in OTG_CTRL_REG */ 75/* bits in OTG_CTRL */
76 76
77#define OTG_XCEIV_OUTPUTS \ 77#define OTG_XCEIV_OUTPUTS \
78 (OTG_ASESSVLD|OTG_BSESSEND|OTG_BSESSVLD|OTG_VBUSVLD|OTG_ID) 78 (OTG_ASESSVLD|OTG_BSESSEND|OTG_BSESSVLD|OTG_VBUSVLD|OTG_ID)
@@ -186,8 +186,8 @@ isp1301_clear_bits(struct isp1301 *isp, u8 reg, u8 bits)
186 186
187/* operational registers */ 187/* operational registers */
188#define ISP1301_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */ 188#define ISP1301_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */
189# define MC1_SPEED_REG (1 << 0) 189# define MC1_SPEED (1 << 0)
190# define MC1_SUSPEND_REG (1 << 1) 190# define MC1_SUSPEND (1 << 1)
191# define MC1_DAT_SE0 (1 << 2) 191# define MC1_DAT_SE0 (1 << 2)
192# define MC1_TRANSPARENT (1 << 3) 192# define MC1_TRANSPARENT (1 << 3)
193# define MC1_BDIS_ACON_EN (1 << 4) 193# define MC1_BDIS_ACON_EN (1 << 4)
@@ -274,7 +274,7 @@ static void power_down(struct isp1301 *isp)
274 isp->otg.state = OTG_STATE_UNDEFINED; 274 isp->otg.state = OTG_STATE_UNDEFINED;
275 275
276 // isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); 276 // isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
277 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND_REG); 277 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
278 278
279 isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_ID_PULLDOWN); 279 isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_ID_PULLDOWN);
280 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); 280 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
@@ -283,7 +283,7 @@ static void power_down(struct isp1301 *isp)
283static void power_up(struct isp1301 *isp) 283static void power_up(struct isp1301 *isp)
284{ 284{
285 // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); 285 // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
286 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND_REG); 286 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
287 287
288 /* do this only when cpu is driving transceiver, 288 /* do this only when cpu is driving transceiver,
289 * so host won't see a low speed device... 289 * so host won't see a low speed device...
@@ -360,6 +360,8 @@ isp1301_defer_work(struct isp1301 *isp, int work)
360/* called from irq handlers */ 360/* called from irq handlers */
361static void a_idle(struct isp1301 *isp, const char *tag) 361static void a_idle(struct isp1301 *isp, const char *tag)
362{ 362{
363 u32 l;
364
363 if (isp->otg.state == OTG_STATE_A_IDLE) 365 if (isp->otg.state == OTG_STATE_A_IDLE)
364 return; 366 return;
365 367
@@ -373,13 +375,17 @@ static void a_idle(struct isp1301 *isp, const char *tag)
373 gadget_suspend(isp); 375 gadget_suspend(isp);
374 } 376 }
375 isp->otg.state = OTG_STATE_A_IDLE; 377 isp->otg.state = OTG_STATE_A_IDLE;
376 isp->last_otg_ctrl = OTG_CTRL_REG = OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; 378 l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
379 omap_writel(l, OTG_CTRL);
380 isp->last_otg_ctrl = l;
377 pr_debug(" --> %s/%s\n", state_name(isp), tag); 381 pr_debug(" --> %s/%s\n", state_name(isp), tag);
378} 382}
379 383
380/* called from irq handlers */ 384/* called from irq handlers */
381static void b_idle(struct isp1301 *isp, const char *tag) 385static void b_idle(struct isp1301 *isp, const char *tag)
382{ 386{
387 u32 l;
388
383 if (isp->otg.state == OTG_STATE_B_IDLE) 389 if (isp->otg.state == OTG_STATE_B_IDLE)
384 return; 390 return;
385 391
@@ -393,7 +399,9 @@ static void b_idle(struct isp1301 *isp, const char *tag)
393 gadget_suspend(isp); 399 gadget_suspend(isp);
394 } 400 }
395 isp->otg.state = OTG_STATE_B_IDLE; 401 isp->otg.state = OTG_STATE_B_IDLE;
396 isp->last_otg_ctrl = OTG_CTRL_REG = OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; 402 l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
403 omap_writel(l, OTG_CTRL);
404 isp->last_otg_ctrl = l;
397 pr_debug(" --> %s/%s\n", state_name(isp), tag); 405 pr_debug(" --> %s/%s\n", state_name(isp), tag);
398} 406}
399 407
@@ -406,7 +414,7 @@ dump_regs(struct isp1301 *isp, const char *label)
406 u8 src = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE); 414 u8 src = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE);
407 415
408 pr_debug("otg: %06x, %s %s, otg/%02x stat/%02x.%02x\n", 416 pr_debug("otg: %06x, %s %s, otg/%02x stat/%02x.%02x\n",
409 OTG_CTRL_REG, label, state_name(isp), 417 omap_readl(OTG_CTRL), label, state_name(isp),
410 ctrl, status, src); 418 ctrl, status, src);
411 /* mode control and irq enables don't change much */ 419 /* mode control and irq enables don't change much */
412#endif 420#endif
@@ -429,7 +437,7 @@ dump_regs(struct isp1301 *isp, const char *label)
429static void check_state(struct isp1301 *isp, const char *tag) 437static void check_state(struct isp1301 *isp, const char *tag)
430{ 438{
431 enum usb_otg_state state = OTG_STATE_UNDEFINED; 439 enum usb_otg_state state = OTG_STATE_UNDEFINED;
432 u8 fsm = OTG_TEST_REG & 0x0ff; 440 u8 fsm = omap_readw(OTG_TEST) & 0x0ff;
433 unsigned extra = 0; 441 unsigned extra = 0;
434 442
435 switch (fsm) { 443 switch (fsm) {
@@ -494,7 +502,8 @@ static void check_state(struct isp1301 *isp, const char *tag)
494 if (isp->otg.state == state && !extra) 502 if (isp->otg.state == state && !extra)
495 return; 503 return;
496 pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag, 504 pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag,
497 state_string(state), fsm, state_name(isp), OTG_CTRL_REG); 505 state_string(state), fsm, state_name(isp),
506 omap_readl(OTG_CTRL));
498} 507}
499 508
500#else 509#else
@@ -508,10 +517,11 @@ static void update_otg1(struct isp1301 *isp, u8 int_src)
508{ 517{
509 u32 otg_ctrl; 518 u32 otg_ctrl;
510 519
511 otg_ctrl = OTG_CTRL_REG 520 otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
512 & OTG_CTRL_MASK 521 otg_ctrl &= ~OTG_XCEIV_INPUTS;
513 & ~OTG_XCEIV_INPUTS 522 otg_ctrl &= ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD);
514 & ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD); 523
524
515 if (int_src & INTR_SESS_VLD) 525 if (int_src & INTR_SESS_VLD)
516 otg_ctrl |= OTG_ASESSVLD; 526 otg_ctrl |= OTG_ASESSVLD;
517 else if (isp->otg.state == OTG_STATE_A_WAIT_VFALL) { 527 else if (isp->otg.state == OTG_STATE_A_WAIT_VFALL) {
@@ -534,7 +544,7 @@ static void update_otg1(struct isp1301 *isp, u8 int_src)
534 return; 544 return;
535 } 545 }
536 } 546 }
537 OTG_CTRL_REG = otg_ctrl; 547 omap_writel(otg_ctrl, OTG_CTRL);
538} 548}
539 549
540/* outputs from ISP1301_OTG_STATUS */ 550/* outputs from ISP1301_OTG_STATUS */
@@ -542,15 +552,14 @@ static void update_otg2(struct isp1301 *isp, u8 otg_status)
542{ 552{
543 u32 otg_ctrl; 553 u32 otg_ctrl;
544 554
545 otg_ctrl = OTG_CTRL_REG 555 otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
546 & OTG_CTRL_MASK 556 otg_ctrl &= ~OTG_XCEIV_INPUTS;
547 & ~OTG_XCEIV_INPUTS 557 otg_ctrl &= ~(OTG_BSESSVLD | OTG_BSESSEND);
548 & ~(OTG_BSESSVLD|OTG_BSESSEND);
549 if (otg_status & OTG_B_SESS_VLD) 558 if (otg_status & OTG_B_SESS_VLD)
550 otg_ctrl |= OTG_BSESSVLD; 559 otg_ctrl |= OTG_BSESSVLD;
551 else if (otg_status & OTG_B_SESS_END) 560 else if (otg_status & OTG_B_SESS_END)
552 otg_ctrl |= OTG_BSESSEND; 561 otg_ctrl |= OTG_BSESSEND;
553 OTG_CTRL_REG = otg_ctrl; 562 omap_writel(otg_ctrl, OTG_CTRL);
554} 563}
555 564
556/* inputs going to ISP1301 */ 565/* inputs going to ISP1301 */
@@ -559,7 +568,7 @@ static void otg_update_isp(struct isp1301 *isp)
559 u32 otg_ctrl, otg_change; 568 u32 otg_ctrl, otg_change;
560 u8 set = OTG1_DM_PULLDOWN, clr = OTG1_DM_PULLUP; 569 u8 set = OTG1_DM_PULLDOWN, clr = OTG1_DM_PULLUP;
561 570
562 otg_ctrl = OTG_CTRL_REG; 571 otg_ctrl = omap_readl(OTG_CTRL);
563 otg_change = otg_ctrl ^ isp->last_otg_ctrl; 572 otg_change = otg_ctrl ^ isp->last_otg_ctrl;
564 isp->last_otg_ctrl = otg_ctrl; 573 isp->last_otg_ctrl = otg_ctrl;
565 otg_ctrl = otg_ctrl & OTG_XCEIV_INPUTS; 574 otg_ctrl = otg_ctrl & OTG_XCEIV_INPUTS;
@@ -639,6 +648,8 @@ pulldown:
639 648
640 /* HNP switch to host or peripheral; and SRP */ 649 /* HNP switch to host or peripheral; and SRP */
641 if (otg_change & OTG_PULLUP) { 650 if (otg_change & OTG_PULLUP) {
651 u32 l;
652
642 switch (isp->otg.state) { 653 switch (isp->otg.state) {
643 case OTG_STATE_B_IDLE: 654 case OTG_STATE_B_IDLE:
644 if (clr & OTG1_DP_PULLUP) 655 if (clr & OTG1_DP_PULLUP)
@@ -655,7 +666,9 @@ pulldown:
655 default: 666 default:
656 break; 667 break;
657 } 668 }
658 OTG_CTRL_REG |= OTG_PULLUP; 669 l = omap_readl(OTG_CTRL);
670 l |= OTG_PULLUP;
671 omap_writel(l, OTG_CTRL);
659 } 672 }
660 673
661 check_state(isp, __func__); 674 check_state(isp, __func__);
@@ -664,20 +677,20 @@ pulldown:
664 677
665static irqreturn_t omap_otg_irq(int irq, void *_isp) 678static irqreturn_t omap_otg_irq(int irq, void *_isp)
666{ 679{
667 u16 otg_irq = OTG_IRQ_SRC_REG; 680 u16 otg_irq = omap_readw(OTG_IRQ_SRC);
668 u32 otg_ctrl; 681 u32 otg_ctrl;
669 int ret = IRQ_NONE; 682 int ret = IRQ_NONE;
670 struct isp1301 *isp = _isp; 683 struct isp1301 *isp = _isp;
671 684
672 /* update ISP1301 transciever from OTG controller */ 685 /* update ISP1301 transciever from OTG controller */
673 if (otg_irq & OPRT_CHG) { 686 if (otg_irq & OPRT_CHG) {
674 OTG_IRQ_SRC_REG = OPRT_CHG; 687 omap_writew(OPRT_CHG, OTG_IRQ_SRC);
675 isp1301_defer_work(isp, WORK_UPDATE_ISP); 688 isp1301_defer_work(isp, WORK_UPDATE_ISP);
676 ret = IRQ_HANDLED; 689 ret = IRQ_HANDLED;
677 690
678 /* SRP to become b_peripheral failed */ 691 /* SRP to become b_peripheral failed */
679 } else if (otg_irq & B_SRP_TMROUT) { 692 } else if (otg_irq & B_SRP_TMROUT) {
680 pr_debug("otg: B_SRP_TIMEOUT, %06x\n", OTG_CTRL_REG); 693 pr_debug("otg: B_SRP_TIMEOUT, %06x\n", omap_readl(OTG_CTRL));
681 notresponding(isp); 694 notresponding(isp);
682 695
683 /* gadget drivers that care should monitor all kinds of 696 /* gadget drivers that care should monitor all kinds of
@@ -687,31 +700,31 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
687 if (isp->otg.state == OTG_STATE_B_SRP_INIT) 700 if (isp->otg.state == OTG_STATE_B_SRP_INIT)
688 b_idle(isp, "srp_timeout"); 701 b_idle(isp, "srp_timeout");
689 702
690 OTG_IRQ_SRC_REG = B_SRP_TMROUT; 703 omap_writew(B_SRP_TMROUT, OTG_IRQ_SRC);
691 ret = IRQ_HANDLED; 704 ret = IRQ_HANDLED;
692 705
693 /* HNP to become b_host failed */ 706 /* HNP to become b_host failed */
694 } else if (otg_irq & B_HNP_FAIL) { 707 } else if (otg_irq & B_HNP_FAIL) {
695 pr_debug("otg: %s B_HNP_FAIL, %06x\n", 708 pr_debug("otg: %s B_HNP_FAIL, %06x\n",
696 state_name(isp), OTG_CTRL_REG); 709 state_name(isp), omap_readl(OTG_CTRL));
697 notresponding(isp); 710 notresponding(isp);
698 711
699 otg_ctrl = OTG_CTRL_REG; 712 otg_ctrl = omap_readl(OTG_CTRL);
700 otg_ctrl |= OTG_BUSDROP; 713 otg_ctrl |= OTG_BUSDROP;
701 otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; 714 otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
702 OTG_CTRL_REG = otg_ctrl; 715 omap_writel(otg_ctrl, OTG_CTRL);
703 716
704 /* subset of b_peripheral()... */ 717 /* subset of b_peripheral()... */
705 isp->otg.state = OTG_STATE_B_PERIPHERAL; 718 isp->otg.state = OTG_STATE_B_PERIPHERAL;
706 pr_debug(" --> b_peripheral\n"); 719 pr_debug(" --> b_peripheral\n");
707 720
708 OTG_IRQ_SRC_REG = B_HNP_FAIL; 721 omap_writew(B_HNP_FAIL, OTG_IRQ_SRC);
709 ret = IRQ_HANDLED; 722 ret = IRQ_HANDLED;
710 723
711 /* detect SRP from B-device ... */ 724 /* detect SRP from B-device ... */
712 } else if (otg_irq & A_SRP_DETECT) { 725 } else if (otg_irq & A_SRP_DETECT) {
713 pr_debug("otg: %s SRP_DETECT, %06x\n", 726 pr_debug("otg: %s SRP_DETECT, %06x\n",
714 state_name(isp), OTG_CTRL_REG); 727 state_name(isp), omap_readl(OTG_CTRL));
715 728
716 isp1301_defer_work(isp, WORK_UPDATE_OTG); 729 isp1301_defer_work(isp, WORK_UPDATE_OTG);
717 switch (isp->otg.state) { 730 switch (isp->otg.state) {
@@ -719,49 +732,49 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
719 if (!isp->otg.host) 732 if (!isp->otg.host)
720 break; 733 break;
721 isp1301_defer_work(isp, WORK_HOST_RESUME); 734 isp1301_defer_work(isp, WORK_HOST_RESUME);
722 otg_ctrl = OTG_CTRL_REG; 735 otg_ctrl = omap_readl(OTG_CTRL);
723 otg_ctrl |= OTG_A_BUSREQ; 736 otg_ctrl |= OTG_A_BUSREQ;
724 otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ) 737 otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ)
725 & ~OTG_XCEIV_INPUTS 738 & ~OTG_XCEIV_INPUTS
726 & OTG_CTRL_MASK; 739 & OTG_CTRL_MASK;
727 OTG_CTRL_REG = otg_ctrl; 740 omap_writel(otg_ctrl, OTG_CTRL);
728 break; 741 break;
729 default: 742 default:
730 break; 743 break;
731 } 744 }
732 745
733 OTG_IRQ_SRC_REG = A_SRP_DETECT; 746 omap_writew(A_SRP_DETECT, OTG_IRQ_SRC);
734 ret = IRQ_HANDLED; 747 ret = IRQ_HANDLED;
735 748
736 /* timer expired: T(a_wait_bcon) and maybe T(a_wait_vrise) 749 /* timer expired: T(a_wait_bcon) and maybe T(a_wait_vrise)
737 * we don't track them separately 750 * we don't track them separately
738 */ 751 */
739 } else if (otg_irq & A_REQ_TMROUT) { 752 } else if (otg_irq & A_REQ_TMROUT) {
740 otg_ctrl = OTG_CTRL_REG; 753 otg_ctrl = omap_readl(OTG_CTRL);
741 pr_info("otg: BCON_TMOUT from %s, %06x\n", 754 pr_info("otg: BCON_TMOUT from %s, %06x\n",
742 state_name(isp), otg_ctrl); 755 state_name(isp), otg_ctrl);
743 notresponding(isp); 756 notresponding(isp);
744 757
745 otg_ctrl |= OTG_BUSDROP; 758 otg_ctrl |= OTG_BUSDROP;
746 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; 759 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
747 OTG_CTRL_REG = otg_ctrl; 760 omap_writel(otg_ctrl, OTG_CTRL);
748 isp->otg.state = OTG_STATE_A_WAIT_VFALL; 761 isp->otg.state = OTG_STATE_A_WAIT_VFALL;
749 762
750 OTG_IRQ_SRC_REG = A_REQ_TMROUT; 763 omap_writew(A_REQ_TMROUT, OTG_IRQ_SRC);
751 ret = IRQ_HANDLED; 764 ret = IRQ_HANDLED;
752 765
753 /* A-supplied voltage fell too low; overcurrent */ 766 /* A-supplied voltage fell too low; overcurrent */
754 } else if (otg_irq & A_VBUS_ERR) { 767 } else if (otg_irq & A_VBUS_ERR) {
755 otg_ctrl = OTG_CTRL_REG; 768 otg_ctrl = omap_readl(OTG_CTRL);
756 printk(KERN_ERR "otg: %s, VBUS_ERR %04x ctrl %06x\n", 769 printk(KERN_ERR "otg: %s, VBUS_ERR %04x ctrl %06x\n",
757 state_name(isp), otg_irq, otg_ctrl); 770 state_name(isp), otg_irq, otg_ctrl);
758 771
759 otg_ctrl |= OTG_BUSDROP; 772 otg_ctrl |= OTG_BUSDROP;
760 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; 773 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
761 OTG_CTRL_REG = otg_ctrl; 774 omap_writel(otg_ctrl, OTG_CTRL);
762 isp->otg.state = OTG_STATE_A_VBUS_ERR; 775 isp->otg.state = OTG_STATE_A_VBUS_ERR;
763 776
764 OTG_IRQ_SRC_REG = A_VBUS_ERR; 777 omap_writew(A_VBUS_ERR, OTG_IRQ_SRC);
765 ret = IRQ_HANDLED; 778 ret = IRQ_HANDLED;
766 779
767 /* switch driver; the transciever code activates it, 780 /* switch driver; the transciever code activates it,
@@ -770,7 +783,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
770 } else if (otg_irq & DRIVER_SWITCH) { 783 } else if (otg_irq & DRIVER_SWITCH) {
771 int kick = 0; 784 int kick = 0;
772 785
773 otg_ctrl = OTG_CTRL_REG; 786 otg_ctrl = omap_readl(OTG_CTRL);
774 printk(KERN_NOTICE "otg: %s, SWITCH to %s, ctrl %06x\n", 787 printk(KERN_NOTICE "otg: %s, SWITCH to %s, ctrl %06x\n",
775 state_name(isp), 788 state_name(isp),
776 (otg_ctrl & OTG_DRIVER_SEL) 789 (otg_ctrl & OTG_DRIVER_SEL)
@@ -793,7 +806,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
793 } else { 806 } else {
794 if (!(otg_ctrl & OTG_ID)) { 807 if (!(otg_ctrl & OTG_ID)) {
795 otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; 808 otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
796 OTG_CTRL_REG = otg_ctrl | OTG_A_BUSREQ; 809 omap_writel(otg_ctrl | OTG_A_BUSREQ, OTG_CTRL);
797 } 810 }
798 811
799 if (isp->otg.host) { 812 if (isp->otg.host) {
@@ -818,7 +831,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
818 } 831 }
819 } 832 }
820 833
821 OTG_IRQ_SRC_REG = DRIVER_SWITCH; 834 omap_writew(DRIVER_SWITCH, OTG_IRQ_SRC);
822 ret = IRQ_HANDLED; 835 ret = IRQ_HANDLED;
823 836
824 if (kick) 837 if (kick)
@@ -834,12 +847,15 @@ static struct platform_device *otg_dev;
834 847
835static int otg_init(struct isp1301 *isp) 848static int otg_init(struct isp1301 *isp)
836{ 849{
850 u32 l;
851
837 if (!otg_dev) 852 if (!otg_dev)
838 return -ENODEV; 853 return -ENODEV;
839 854
840 dump_regs(isp, __func__); 855 dump_regs(isp, __func__);
841 /* some of these values are board-specific... */ 856 /* some of these values are board-specific... */
842 OTG_SYSCON_2_REG |= OTG_EN 857 l = omap_readl(OTG_SYSCON_2);
858 l |= OTG_EN
843 /* for B-device: */ 859 /* for B-device: */
844 | SRP_GPDATA /* 9msec Bdev D+ pulse */ 860 | SRP_GPDATA /* 9msec Bdev D+ pulse */
845 | SRP_GPDVBUS /* discharge after VBUS pulse */ 861 | SRP_GPDVBUS /* discharge after VBUS pulse */
@@ -849,18 +865,22 @@ static int otg_init(struct isp1301 *isp)
849 | SRP_DPW /* detect 167+ns SRP pulses */ 865 | SRP_DPW /* detect 167+ns SRP pulses */
850 | SRP_DATA | SRP_VBUS /* accept both kinds of SRP pulse */ 866 | SRP_DATA | SRP_VBUS /* accept both kinds of SRP pulse */
851 ; 867 ;
868 omap_writel(l, OTG_SYSCON_2);
852 869
853 update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE)); 870 update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE));
854 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); 871 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
855 872
856 check_state(isp, __func__); 873 check_state(isp, __func__);
857 pr_debug("otg: %s, %s %06x\n", 874 pr_debug("otg: %s, %s %06x\n",
858 state_name(isp), __func__, OTG_CTRL_REG); 875 state_name(isp), __func__, omap_readl(OTG_CTRL));
859 876
860 OTG_IRQ_EN_REG = DRIVER_SWITCH | OPRT_CHG 877 omap_writew(DRIVER_SWITCH | OPRT_CHG
861 | B_SRP_TMROUT | B_HNP_FAIL 878 | B_SRP_TMROUT | B_HNP_FAIL
862 | A_VBUS_ERR | A_SRP_DETECT | A_REQ_TMROUT; 879 | A_VBUS_ERR | A_SRP_DETECT | A_REQ_TMROUT, OTG_IRQ_EN);
863 OTG_SYSCON_2_REG |= OTG_EN; 880
881 l = omap_readl(OTG_SYSCON_2);
882 l |= OTG_EN;
883 omap_writel(l, OTG_SYSCON_2);
864 884
865 return 0; 885 return 0;
866} 886}
@@ -927,7 +947,11 @@ static void otg_unbind(struct isp1301 *isp)
927 947
928static void b_peripheral(struct isp1301 *isp) 948static void b_peripheral(struct isp1301 *isp)
929{ 949{
930 OTG_CTRL_REG = OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; 950 u32 l;
951
952 l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
953 omap_writel(l, OTG_CTRL);
954
931 usb_gadget_vbus_connect(isp->otg.gadget); 955 usb_gadget_vbus_connect(isp->otg.gadget);
932 956
933#ifdef CONFIG_USB_OTG 957#ifdef CONFIG_USB_OTG
@@ -999,6 +1023,8 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
999 isp_bstat = 0; 1023 isp_bstat = 0;
1000 } 1024 }
1001 } else { 1025 } else {
1026 u32 l;
1027
1002 /* if user unplugged mini-A end of cable, 1028 /* if user unplugged mini-A end of cable,
1003 * don't bypass A_WAIT_VFALL. 1029 * don't bypass A_WAIT_VFALL.
1004 */ 1030 */
@@ -1019,8 +1045,9 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
1019 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, 1045 isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1,
1020 MC1_BDIS_ACON_EN); 1046 MC1_BDIS_ACON_EN);
1021 isp->otg.state = OTG_STATE_B_IDLE; 1047 isp->otg.state = OTG_STATE_B_IDLE;
1022 OTG_CTRL_REG &= OTG_CTRL_REG & OTG_CTRL_MASK 1048 l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
1023 & ~OTG_CTRL_BITS; 1049 l &= ~OTG_CTRL_BITS;
1050 omap_writel(l, OTG_CTRL);
1024 break; 1051 break;
1025 case OTG_STATE_B_IDLE: 1052 case OTG_STATE_B_IDLE:
1026 break; 1053 break;
@@ -1046,7 +1073,8 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
1046 /* FALLTHROUGH */ 1073 /* FALLTHROUGH */
1047 case OTG_STATE_B_SRP_INIT: 1074 case OTG_STATE_B_SRP_INIT:
1048 b_idle(isp, __func__); 1075 b_idle(isp, __func__);
1049 OTG_CTRL_REG &= OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; 1076 l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
1077 omap_writel(l, OTG_CTRL);
1050 /* FALLTHROUGH */ 1078 /* FALLTHROUGH */
1051 case OTG_STATE_B_IDLE: 1079 case OTG_STATE_B_IDLE:
1052 if (isp->otg.gadget && (isp_bstat & OTG_B_SESS_VLD)) { 1080 if (isp->otg.gadget && (isp_bstat & OTG_B_SESS_VLD)) {
@@ -1130,11 +1158,11 @@ isp1301_work(struct work_struct *work)
1130 case OTG_STATE_A_WAIT_VRISE: 1158 case OTG_STATE_A_WAIT_VRISE:
1131 isp->otg.state = OTG_STATE_A_HOST; 1159 isp->otg.state = OTG_STATE_A_HOST;
1132 pr_debug(" --> a_host\n"); 1160 pr_debug(" --> a_host\n");
1133 otg_ctrl = OTG_CTRL_REG; 1161 otg_ctrl = omap_readl(OTG_CTRL);
1134 otg_ctrl |= OTG_A_BUSREQ; 1162 otg_ctrl |= OTG_A_BUSREQ;
1135 otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ) 1163 otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ)
1136 & OTG_CTRL_MASK; 1164 & OTG_CTRL_MASK;
1137 OTG_CTRL_REG = otg_ctrl; 1165 omap_writel(otg_ctrl, OTG_CTRL);
1138 break; 1166 break;
1139 case OTG_STATE_B_WAIT_ACON: 1167 case OTG_STATE_B_WAIT_ACON:
1140 isp->otg.state = OTG_STATE_B_HOST; 1168 isp->otg.state = OTG_STATE_B_HOST;
@@ -1274,7 +1302,7 @@ isp1301_set_host(struct otg_transceiver *otg, struct usb_bus *host)
1274 return -ENODEV; 1302 return -ENODEV;
1275 1303
1276 if (!host) { 1304 if (!host) {
1277 OTG_IRQ_EN_REG = 0; 1305 omap_writew(0, OTG_IRQ_EN);
1278 power_down(isp); 1306 power_down(isp);
1279 isp->otg.host = 0; 1307 isp->otg.host = 0;
1280 return 0; 1308 return 0;
@@ -1325,12 +1353,13 @@ static int
1325isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget) 1353isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1326{ 1354{
1327 struct isp1301 *isp = container_of(otg, struct isp1301, otg); 1355 struct isp1301 *isp = container_of(otg, struct isp1301, otg);
1356 u32 l;
1328 1357
1329 if (!otg || isp != the_transceiver) 1358 if (!otg || isp != the_transceiver)
1330 return -ENODEV; 1359 return -ENODEV;
1331 1360
1332 if (!gadget) { 1361 if (!gadget) {
1333 OTG_IRQ_EN_REG = 0; 1362 omap_writew(0, OTG_IRQ_EN);
1334 if (!isp->otg.default_a) 1363 if (!isp->otg.default_a)
1335 enable_vbus_draw(isp, 0); 1364 enable_vbus_draw(isp, 0);
1336 usb_gadget_vbus_disconnect(isp->otg.gadget); 1365 usb_gadget_vbus_disconnect(isp->otg.gadget);
@@ -1351,9 +1380,11 @@ isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1351 isp->otg.gadget = gadget; 1380 isp->otg.gadget = gadget;
1352 // FIXME update its refcount 1381 // FIXME update its refcount
1353 1382
1354 OTG_CTRL_REG = (OTG_CTRL_REG & OTG_CTRL_MASK 1383 l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
1355 & ~(OTG_XCEIV_OUTPUTS|OTG_CTRL_BITS)) 1384 l &= ~(OTG_XCEIV_OUTPUTS|OTG_CTRL_BITS);
1356 | OTG_ID; 1385 l |= OTG_ID;
1386 omap_writel(l, OTG_CTRL);
1387
1357 power_up(isp); 1388 power_up(isp);
1358 isp->otg.state = OTG_STATE_B_IDLE; 1389 isp->otg.state = OTG_STATE_B_IDLE;
1359 1390
@@ -1405,16 +1436,17 @@ isp1301_start_srp(struct otg_transceiver *dev)
1405 || isp->otg.state != OTG_STATE_B_IDLE) 1436 || isp->otg.state != OTG_STATE_B_IDLE)
1406 return -ENODEV; 1437 return -ENODEV;
1407 1438
1408 otg_ctrl = OTG_CTRL_REG; 1439 otg_ctrl = omap_readl(OTG_CTRL);
1409 if (!(otg_ctrl & OTG_BSESSEND)) 1440 if (!(otg_ctrl & OTG_BSESSEND))
1410 return -EINVAL; 1441 return -EINVAL;
1411 1442
1412 otg_ctrl |= OTG_B_BUSREQ; 1443 otg_ctrl |= OTG_B_BUSREQ;
1413 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK; 1444 otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK;
1414 OTG_CTRL_REG = otg_ctrl; 1445 omap_writel(otg_ctrl, OTG_CTRL);
1415 isp->otg.state = OTG_STATE_B_SRP_INIT; 1446 isp->otg.state = OTG_STATE_B_SRP_INIT;
1416 1447
1417 pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), OTG_CTRL_REG); 1448 pr_debug("otg: SRP, %s ... %06x\n", state_name(isp),
1449 omap_readl(OTG_CTRL));
1418#ifdef CONFIG_USB_OTG 1450#ifdef CONFIG_USB_OTG
1419 check_state(isp, __func__); 1451 check_state(isp, __func__);
1420#endif 1452#endif
@@ -1426,6 +1458,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
1426{ 1458{
1427#ifdef CONFIG_USB_OTG 1459#ifdef CONFIG_USB_OTG
1428 struct isp1301 *isp = container_of(dev, struct isp1301, otg); 1460 struct isp1301 *isp = container_of(dev, struct isp1301, otg);
1461 u32 l;
1429 1462
1430 if (!dev || isp != the_transceiver) 1463 if (!dev || isp != the_transceiver)
1431 return -ENODEV; 1464 return -ENODEV;
@@ -1452,7 +1485,9 @@ isp1301_start_hnp(struct otg_transceiver *dev)
1452#endif 1485#endif
1453 /* caller must suspend then clear A_BUSREQ */ 1486 /* caller must suspend then clear A_BUSREQ */
1454 usb_gadget_vbus_connect(isp->otg.gadget); 1487 usb_gadget_vbus_connect(isp->otg.gadget);
1455 OTG_CTRL_REG |= OTG_A_SETB_HNPEN; 1488 l = omap_readl(OTG_CTRL);
1489 l |= OTG_A_SETB_HNPEN;
1490 omap_writel(l, OTG_CTRL);
1456 1491
1457 break; 1492 break;
1458 case OTG_STATE_A_PERIPHERAL: 1493 case OTG_STATE_A_PERIPHERAL:
@@ -1462,7 +1497,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
1462 return -EILSEQ; 1497 return -EILSEQ;
1463 } 1498 }
1464 pr_debug("otg: HNP %s, %06x ...\n", 1499 pr_debug("otg: HNP %s, %06x ...\n",
1465 state_name(isp), OTG_CTRL_REG); 1500 state_name(isp), omap_readl(OTG_CTRL));
1466 check_state(isp, __func__); 1501 check_state(isp, __func__);
1467 return 0; 1502 return 0;
1468#else 1503#else
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index d34c14c81c29..006a5857256a 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -34,6 +34,7 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/i2c.h> 35#include <linux/i2c.h>
36#include <linux/i2c-dev.h> 36#include <linux/i2c-dev.h>
37#include <linux/smp_lock.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39static struct i2c_driver i2cdev_driver; 40static struct i2c_driver i2cdev_driver;
@@ -441,14 +442,20 @@ static int i2cdev_open(struct inode *inode, struct file *file)
441 struct i2c_client *client; 442 struct i2c_client *client;
442 struct i2c_adapter *adap; 443 struct i2c_adapter *adap;
443 struct i2c_dev *i2c_dev; 444 struct i2c_dev *i2c_dev;
445 int ret = 0;
444 446
447 lock_kernel();
445 i2c_dev = i2c_dev_get_by_minor(minor); 448 i2c_dev = i2c_dev_get_by_minor(minor);
446 if (!i2c_dev) 449 if (!i2c_dev) {
447 return -ENODEV; 450 ret = -ENODEV;
451 goto out;
452 }
448 453
449 adap = i2c_get_adapter(i2c_dev->adap->nr); 454 adap = i2c_get_adapter(i2c_dev->adap->nr);
450 if (!adap) 455 if (!adap) {
451 return -ENODEV; 456 ret = -ENODEV;
457 goto out;
458 }
452 459
453 /* This creates an anonymous i2c_client, which may later be 460 /* This creates an anonymous i2c_client, which may later be
454 * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE. 461 * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE.
@@ -460,7 +467,8 @@ static int i2cdev_open(struct inode *inode, struct file *file)
460 client = kzalloc(sizeof(*client), GFP_KERNEL); 467 client = kzalloc(sizeof(*client), GFP_KERNEL);
461 if (!client) { 468 if (!client) {
462 i2c_put_adapter(adap); 469 i2c_put_adapter(adap);
463 return -ENOMEM; 470 ret = -ENOMEM;
471 goto out;
464 } 472 }
465 snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr); 473 snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
466 client->driver = &i2cdev_driver; 474 client->driver = &i2cdev_driver;
@@ -468,7 +476,9 @@ static int i2cdev_open(struct inode *inode, struct file *file)
468 client->adapter = adap; 476 client->adapter = adap;
469 file->private_data = client; 477 file->private_data = client;
470 478
471 return 0; 479out:
480 unlock_kernel();
481 return ret;
472} 482}
473 483
474static int i2cdev_release(struct inode *inode, struct file *file) 484static int i2cdev_release(struct inode *inode, struct file *file)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1e1f26331a24..a3d228302d20 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2421,9 +2421,12 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2421 if (i >= MAX_HWIFS * MAX_DRIVES) 2421 if (i >= MAX_HWIFS * MAX_DRIVES)
2422 return -ENXIO; 2422 return -ENXIO;
2423 2423
2424 lock_kernel();
2424 tape = ide_tape_chrdev_get(i); 2425 tape = ide_tape_chrdev_get(i);
2425 if (!tape) 2426 if (!tape) {
2427 unlock_kernel();
2426 return -ENXIO; 2428 return -ENXIO;
2429 }
2427 2430
2428 debug_log(DBG_CHRDEV, "Enter %s\n", __func__); 2431 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2429 2432
@@ -2482,10 +2485,12 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2482 } 2485 }
2483 } 2486 }
2484 } 2487 }
2488 unlock_kernel();
2485 return 0; 2489 return 0;
2486 2490
2487out_put_tape: 2491out_put_tape:
2488 ide_tape_put(tape); 2492 ide_tape_put(tape);
2493 unlock_kernel();
2489 return retval; 2494 return retval;
2490} 2495}
2491 2496
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 3381424d70a1..8dbf4d9b6447 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -63,11 +63,11 @@ MODULE_LICENSE("Dual MPL/GPL");
63 63
64#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) 64#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
65 65
66#ifdef PCMCIA_DEBUG 66#ifdef CONFIG_PCMCIA_DEBUG
67INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); 67INT_MODULE_PARM(pc_debug, 0);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) 68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69static char *version = 69/*static char *version =
70"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)"; 70"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)";*/
71#else 71#else
72#define DEBUG(n, args...) 72#define DEBUG(n, args...)
73#endif 73#endif
@@ -375,7 +375,7 @@ failed:
375 375
376======================================================================*/ 376======================================================================*/
377 377
378void ide_release(struct pcmcia_device *link) 378static void ide_release(struct pcmcia_device *link)
379{ 379{
380 ide_info_t *info = link->priv; 380 ide_info_t *info = link->priv;
381 ide_hwif_t *hwif = info->hwif; 381 ide_hwif_t *hwif = info->hwif;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index d7a6881b571d..b25675faaaf5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -45,6 +45,7 @@
45#include <linux/cdev.h> 45#include <linux/cdev.h>
46#include <linux/idr.h> 46#include <linux/idr.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/smp_lock.h>
48 49
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
50 51
@@ -1159,6 +1160,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1159{ 1160{
1160 struct ib_ucm_file *file; 1161 struct ib_ucm_file *file;
1161 1162
1163 cycle_kernel_lock();
1162 file = kmalloc(sizeof(*file), GFP_KERNEL); 1164 file = kmalloc(sizeof(*file), GFP_KERNEL);
1163 if (!file) 1165 if (!file)
1164 return -ENOMEM; 1166 return -ENOMEM;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca4cf3a511ab..195f97302fe5 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -38,6 +38,7 @@
38#include <linux/in.h> 38#include <linux/in.h>
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
41#include <linux/smp_lock.h>
41 42
42#include <rdma/rdma_user_cm.h> 43#include <rdma/rdma_user_cm.h>
43#include <rdma/ib_marshall.h> 44#include <rdma/ib_marshall.h>
@@ -1156,6 +1157,7 @@ static int ucma_open(struct inode *inode, struct file *filp)
1156 if (!file) 1157 if (!file)
1157 return -ENOMEM; 1158 return -ENOMEM;
1158 1159
1160 lock_kernel();
1159 INIT_LIST_HEAD(&file->event_list); 1161 INIT_LIST_HEAD(&file->event_list);
1160 INIT_LIST_HEAD(&file->ctx_list); 1162 INIT_LIST_HEAD(&file->ctx_list);
1161 init_waitqueue_head(&file->poll_wait); 1163 init_waitqueue_head(&file->poll_wait);
@@ -1163,6 +1165,7 @@ static int ucma_open(struct inode *inode, struct file *filp)
1163 1165
1164 filp->private_data = file; 1166 filp->private_data = file;
1165 file->filp = filp; 1167 file->filp = filp;
1168 unlock_kernel();
1166 return 0; 1169 return 0;
1167} 1170}
1168 1171
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 840ede9ae965..208c7f34323c 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -777,6 +777,19 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
777} 777}
778#endif 778#endif
779 779
780/*
781 * ib_umad_open() does not need the BKL:
782 *
783 * - umad_port[] accesses are protected by port_lock, the
784 * ib_umad_port structures are properly reference counted, and
785 * everything else is purely local to the file being created, so
786 * races against other open calls are not a problem;
787 * - the ioctl method does not affect any global state outside of the
788 * file structure being operated on;
789 * - the port is added to umad_port[] as the last part of module
790 * initialization so the open method will either immediately run
791 * -ENXIO, or all required initialization will be done.
792 */
780static int ib_umad_open(struct inode *inode, struct file *filp) 793static int ib_umad_open(struct inode *inode, struct file *filp)
781{ 794{
782 struct ib_umad_port *port; 795 struct ib_umad_port *port;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index caed42bf7ef5..0f34858e31e7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -610,6 +610,18 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
610 return file->device->ib_dev->mmap(file->ucontext, vma); 610 return file->device->ib_dev->mmap(file->ucontext, vma);
611} 611}
612 612
613/*
614 * ib_uverbs_open() does not need the BKL:
615 *
616 * - dev_table[] accesses are protected by map_lock, the
617 * ib_uverbs_device structures are properly reference counted, and
618 * everything else is purely local to the file being created, so
619 * races against other open calls are not a problem;
620 * - there is no ioctl method to race against;
621 * - the device is added to dev_table[] as the last part of module
622 * initialization, the open method will either immediately run
623 * -ENXIO, or all required initialization will be done.
624 */
613static int ib_uverbs_open(struct inode *inode, struct file *filp) 625static int ib_uverbs_open(struct inode *inode, struct file *filp)
614{ 626{
615 struct ib_uverbs_device *dev; 627 struct ib_uverbs_device *dev;
@@ -651,7 +663,6 @@ err_module:
651 663
652err: 664err:
653 kref_put(&dev->ref, ib_uverbs_release_dev); 665 kref_put(&dev->ref, ib_uverbs_release_dev);
654
655 return ret; 666 return ret;
656} 667}
657 668
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index b472b15637f0..35f301c88b57 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -39,6 +39,7 @@
39#include <linux/highmem.h> 39#include <linux/highmem.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/jiffies.h> 41#include <linux/jiffies.h>
42#include <linux/smp_lock.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43 44
44#include "ipath_kernel.h" 45#include "ipath_kernel.h"
@@ -1815,6 +1816,7 @@ done:
1815static int ipath_open(struct inode *in, struct file *fp) 1816static int ipath_open(struct inode *in, struct file *fp)
1816{ 1817{
1817 /* The real work is performed later in ipath_assign_port() */ 1818 /* The real work is performed later in ipath_assign_port() */
1819 cycle_kernel_lock();
1818 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); 1820 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
1819 return fp->private_data ? 0 : -ENOMEM; 1821 return fp->private_data ? 0 : -ENOMEM;
1820} 1822}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 27006fc18305..408df0bd6be5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -21,6 +21,7 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/smp_lock.h>
24 25
25MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 26MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
26MODULE_DESCRIPTION("Input core"); 27MODULE_DESCRIPTION("Input core");
@@ -1588,13 +1589,17 @@ EXPORT_SYMBOL(input_unregister_handle);
1588 1589
1589static int input_open_file(struct inode *inode, struct file *file) 1590static int input_open_file(struct inode *inode, struct file *file)
1590{ 1591{
1591 struct input_handler *handler = input_table[iminor(inode) >> 5]; 1592 struct input_handler *handler;
1592 const struct file_operations *old_fops, *new_fops = NULL; 1593 const struct file_operations *old_fops, *new_fops = NULL;
1593 int err; 1594 int err;
1594 1595
1596 lock_kernel();
1595 /* No load-on-demand here? */ 1597 /* No load-on-demand here? */
1596 if (!handler || !(new_fops = fops_get(handler->fops))) 1598 handler = input_table[iminor(inode) >> 5];
1597 return -ENODEV; 1599 if (!handler || !(new_fops = fops_get(handler->fops))) {
1600 err = -ENODEV;
1601 goto out;
1602 }
1598 1603
1599 /* 1604 /*
1600 * That's _really_ odd. Usually NULL ->open means "nothing special", 1605 * That's _really_ odd. Usually NULL ->open means "nothing special",
@@ -1602,7 +1607,8 @@ static int input_open_file(struct inode *inode, struct file *file)
1602 */ 1607 */
1603 if (!new_fops->open) { 1608 if (!new_fops->open) {
1604 fops_put(new_fops); 1609 fops_put(new_fops);
1605 return -ENODEV; 1610 err = -ENODEV;
1611 goto out;
1606 } 1612 }
1607 old_fops = file->f_op; 1613 old_fops = file->f_op;
1608 file->f_op = new_fops; 1614 file->f_op = new_fops;
@@ -1614,6 +1620,8 @@ static int input_open_file(struct inode *inode, struct file *file)
1614 file->f_op = fops_get(old_fops); 1620 file->f_op = fops_get(old_fops);
1615 } 1621 }
1616 fops_put(old_fops); 1622 fops_put(old_fops);
1623out:
1624 unlock_kernel();
1617 return err; 1625 return err;
1618} 1626}
1619 1627
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 45e5d05b01de..49d8abfe38fe 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/hp_sdc.h> 36#include <linux/hp_sdc.h>
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <linux/smp_lock.h>
38#include <linux/types.h> 39#include <linux/types.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/module.h> 41#include <linux/module.h>
@@ -408,6 +409,7 @@ static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
408 409
409static int hp_sdc_rtc_open(struct inode *inode, struct file *file) 410static int hp_sdc_rtc_open(struct inode *inode, struct file *file)
410{ 411{
412 cycle_kernel_lock();
411 return 0; 413 return 0;
412} 414}
413 415
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index a56ad4ba8fe2..2bcfa0b35061 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -37,6 +37,7 @@
37#include <linux/fs.h> 37#include <linux/fs.h>
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <linux/uinput.h> 39#include <linux/uinput.h>
40#include <linux/smp_lock.h>
40 41
41static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 42static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
42{ 43{
@@ -222,6 +223,7 @@ static int uinput_open(struct inode *inode, struct file *file)
222 if (!newdev) 223 if (!newdev)
223 return -ENOMEM; 224 return -ENOMEM;
224 225
226 lock_kernel();
225 mutex_init(&newdev->mutex); 227 mutex_init(&newdev->mutex);
226 spin_lock_init(&newdev->requests_lock); 228 spin_lock_init(&newdev->requests_lock);
227 init_waitqueue_head(&newdev->requests_waitq); 229 init_waitqueue_head(&newdev->requests_waitq);
@@ -229,6 +231,7 @@ static int uinput_open(struct inode *inode, struct file *file)
229 newdev->state = UIST_NEW_DEVICE; 231 newdev->state = UIST_NEW_DEVICE;
230 232
231 file->private_data = newdev; 233 file->private_data = newdev;
234 unlock_kernel();
232 235
233 return 0; 236 return 0;
234} 237}
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b989748598ae..8137e50ded87 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -14,6 +14,7 @@
14#define MOUSEDEV_MIX 31 14#define MOUSEDEV_MIX 31
15 15
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/smp_lock.h>
17#include <linux/poll.h> 18#include <linux/poll.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/init.h> 20#include <linux/init.h>
@@ -545,16 +546,21 @@ static int mousedev_open(struct inode *inode, struct file *file)
545 if (i >= MOUSEDEV_MINORS) 546 if (i >= MOUSEDEV_MINORS)
546 return -ENODEV; 547 return -ENODEV;
547 548
549 lock_kernel();
548 error = mutex_lock_interruptible(&mousedev_table_mutex); 550 error = mutex_lock_interruptible(&mousedev_table_mutex);
549 if (error) 551 if (error) {
552 unlock_kernel();
550 return error; 553 return error;
554 }
551 mousedev = mousedev_table[i]; 555 mousedev = mousedev_table[i];
552 if (mousedev) 556 if (mousedev)
553 get_device(&mousedev->dev); 557 get_device(&mousedev->dev);
554 mutex_unlock(&mousedev_table_mutex); 558 mutex_unlock(&mousedev_table_mutex);
555 559
556 if (!mousedev) 560 if (!mousedev) {
561 unlock_kernel();
557 return -ENODEV; 562 return -ENODEV;
563 }
558 564
559 client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); 565 client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
560 if (!client) { 566 if (!client) {
@@ -573,6 +579,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
573 goto err_free_client; 579 goto err_free_client;
574 580
575 file->private_data = client; 581 file->private_data = client;
582 unlock_kernel();
576 return 0; 583 return 0;
577 584
578 err_free_client: 585 err_free_client:
@@ -580,6 +587,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
580 kfree(client); 587 kfree(client);
581 err_put_mousedev: 588 err_put_mousedev:
582 put_device(&mousedev->dev); 589 put_device(&mousedev->dev);
590 unlock_kernel();
583 return error; 591 return error;
584} 592}
585 593
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 0403622ae267..c9397c8ee97e 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h>
13#include <linux/poll.h> 14#include <linux/poll.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/serio.h> 16#include <linux/serio.h>
@@ -81,9 +82,10 @@ static int serio_raw_open(struct inode *inode, struct file *file)
81 struct serio_raw_list *list; 82 struct serio_raw_list *list;
82 int retval = 0; 83 int retval = 0;
83 84
85 lock_kernel();
84 retval = mutex_lock_interruptible(&serio_raw_mutex); 86 retval = mutex_lock_interruptible(&serio_raw_mutex);
85 if (retval) 87 if (retval)
86 return retval; 88 goto out_bkl;
87 89
88 if (!(serio_raw = serio_raw_locate(iminor(inode)))) { 90 if (!(serio_raw = serio_raw_locate(iminor(inode)))) {
89 retval = -ENODEV; 91 retval = -ENODEV;
@@ -108,6 +110,8 @@ static int serio_raw_open(struct inode *inode, struct file *file)
108 110
109out: 111out:
110 mutex_unlock(&serio_raw_mutex); 112 mutex_unlock(&serio_raw_mutex);
113out_bkl:
114 unlock_kernel();
111 return retval; 115 return retval;
112} 116}
113 117
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 0f47f4697cdf..9ce3b3baf3a2 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -66,6 +66,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
66 case XENKBD_TYPE_MOTION: 66 case XENKBD_TYPE_MOTION:
67 input_report_rel(dev, REL_X, event->motion.rel_x); 67 input_report_rel(dev, REL_X, event->motion.rel_x);
68 input_report_rel(dev, REL_Y, event->motion.rel_y); 68 input_report_rel(dev, REL_Y, event->motion.rel_y);
69 if (event->motion.rel_z)
70 input_report_rel(dev, REL_WHEEL,
71 -event->motion.rel_z);
69 break; 72 break;
70 case XENKBD_TYPE_KEY: 73 case XENKBD_TYPE_KEY:
71 dev = NULL; 74 dev = NULL;
@@ -84,6 +87,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
84 case XENKBD_TYPE_POS: 87 case XENKBD_TYPE_POS:
85 input_report_abs(dev, ABS_X, event->pos.abs_x); 88 input_report_abs(dev, ABS_X, event->pos.abs_x);
86 input_report_abs(dev, ABS_Y, event->pos.abs_y); 89 input_report_abs(dev, ABS_Y, event->pos.abs_y);
90 if (event->pos.rel_z)
91 input_report_rel(dev, REL_WHEEL,
92 -event->pos.rel_z);
87 break; 93 break;
88 } 94 }
89 if (dev) 95 if (dev)
@@ -152,7 +158,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
152 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); 158 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
153 for (i = BTN_LEFT; i <= BTN_TASK; i++) 159 for (i = BTN_LEFT; i <= BTN_TASK; i++)
154 set_bit(i, ptr->keybit); 160 set_bit(i, ptr->keybit);
155 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y); 161 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
156 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); 162 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
157 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); 163 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
158 164
@@ -294,6 +300,16 @@ InitWait:
294 */ 300 */
295 if (dev->state != XenbusStateConnected) 301 if (dev->state != XenbusStateConnected)
296 goto InitWait; /* no InitWait seen yet, fudge it */ 302 goto InitWait; /* no InitWait seen yet, fudge it */
303
304 /* Set input abs params to match backend screen res */
305 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
306 "width", "%d", &val) > 0)
307 input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
308
309 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
310 "height", "%d", &val) > 0)
311 input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
312
297 break; 313 break;
298 314
299 case XenbusStateClosing: 315 case XenbusStateClosing:
@@ -337,4 +353,6 @@ static void __exit xenkbd_cleanup(void)
337module_init(xenkbd_init); 353module_init(xenkbd_init);
338module_exit(xenkbd_cleanup); 354module_exit(xenkbd_cleanup);
339 355
356MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend");
340MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
358MODULE_ALIAS("xen:vkbd");
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6ca0bb949ad3..2095153582f1 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -20,6 +20,7 @@
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/smp_lock.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
24#include <linux/wait.h> 25#include <linux/wait.h>
25#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE 26#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
@@ -983,13 +984,17 @@ capi_ioctl(struct inode *inode, struct file *file,
983static int 984static int
984capi_open(struct inode *inode, struct file *file) 985capi_open(struct inode *inode, struct file *file)
985{ 986{
987 int ret;
988
989 lock_kernel();
986 if (file->private_data) 990 if (file->private_data)
987 return -EEXIST; 991 ret = -EEXIST;
988 992 else if ((file->private_data = capidev_alloc()) == NULL)
989 if ((file->private_data = capidev_alloc()) == NULL) 993 ret = -ENOMEM;
990 return -ENOMEM; 994 else
991 995 ret = nonseekable_open(inode, file);
992 return nonseekable_open(inode, file); 996 unlock_kernel();
997 return ret;
993} 998}
994 999
995static int 1000static int
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index c90928974249..1e85f743214e 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/smp_lock.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18 19
19#include "platform.h" 20#include "platform.h"
@@ -127,14 +128,19 @@ static unsigned int maint_poll(struct file *file, poll_table * wait)
127 128
128static int maint_open(struct inode *ino, struct file *filep) 129static int maint_open(struct inode *ino, struct file *filep)
129{ 130{
131 int ret;
132
133 lock_kernel();
130 /* only one open is allowed, so we test 134 /* only one open is allowed, so we test
131 it atomically */ 135 it atomically */
132 if (test_and_set_bit(0, &opened)) 136 if (test_and_set_bit(0, &opened))
133 return (-EBUSY); 137 ret = -EBUSY;
134 138 else {
135 filep->private_data = NULL; 139 filep->private_data = NULL;
136 140 ret = nonseekable_open(ino, filep);
137 return nonseekable_open(ino, filep); 141 }
142 unlock_kernel();
143 return ret;
138} 144}
139 145
140static int maint_close(struct inode *ino, struct file *filep) 146static int maint_close(struct inode *ino, struct file *filep)
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 78f141e77466..f4969fe0a055 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -17,6 +17,7 @@
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/smp_lock.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
22#include "platform.h" 23#include "platform.h"
@@ -400,6 +401,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table * wait)
400 401
401static int um_idi_open(struct inode *inode, struct file *file) 402static int um_idi_open(struct inode *inode, struct file *file)
402{ 403{
404 cycle_kernel_lock();
403 return (0); 405 return (0);
404} 406}
405 407
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 16a874bb1561..fbbcb27fb681 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -21,6 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/kmod.h> 23#include <linux/kmod.h>
24#include <linux/smp_lock.h>
24 25
25#include "platform.h" 26#include "platform.h"
26#undef ID_MASK 27#undef ID_MASK
@@ -580,6 +581,7 @@ xdi_copy_from_user(void *os_handle, void *dst, const void __user *src, int lengt
580 */ 581 */
581static int divas_open(struct inode *inode, struct file *file) 582static int divas_open(struct inode *inode, struct file *file)
582{ 583{
584 cycle_kernel_lock();
583 return (0); 585 return (0);
584} 586}
585 587
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8d8c6b736167..7188c59a76ff 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1732,7 +1732,7 @@ isdn_open(struct inode *ino, struct file *filep)
1732 int chidx; 1732 int chidx;
1733 int retval = -ENODEV; 1733 int retval = -ENODEV;
1734 1734
1735 1735 lock_kernel();
1736 if (minor == ISDN_MINOR_STATUS) { 1736 if (minor == ISDN_MINOR_STATUS) {
1737 infostruct *p; 1737 infostruct *p;
1738 1738
@@ -1783,6 +1783,7 @@ isdn_open(struct inode *ino, struct file *filep)
1783#endif 1783#endif
1784 out: 1784 out:
1785 nonseekable_open(ino, filep); 1785 nonseekable_open(ino, filep);
1786 unlock_kernel();
1786 return retval; 1787 return retval;
1787} 1788}
1788 1789
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 6b8dbb9ba73b..76f2b36881c3 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !(X86_VISWS || X86_VOYAGER) 3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 005bd045d2eb..5faefeaf6790 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -136,7 +136,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
136 * first step in the migration to the kernel types. pte_pfn is already defined 136 * first step in the migration to the kernel types. pte_pfn is already defined
137 * in the kernel. */ 137 * in the kernel. */
138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
139#define pte_flags(x) (pte_val(x) & ~PAGE_MASK)
140#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 139#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
141 140
142/* interrupts_and_traps.c: */ 141/* interrupts_and_traps.c: */
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 61b62a6f681b..e5d446804d32 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -643,12 +643,18 @@ do_adb_query(struct adb_request *req)
643static int adb_open(struct inode *inode, struct file *file) 643static int adb_open(struct inode *inode, struct file *file)
644{ 644{
645 struct adbdev_state *state; 645 struct adbdev_state *state;
646 int ret = 0;
646 647
647 if (iminor(inode) > 0 || adb_controller == NULL) 648 lock_kernel();
648 return -ENXIO; 649 if (iminor(inode) > 0 || adb_controller == NULL) {
650 ret = -ENXIO;
651 goto out;
652 }
649 state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL); 653 state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL);
650 if (state == 0) 654 if (state == 0) {
651 return -ENOMEM; 655 ret = -ENOMEM;
656 goto out;
657 }
652 file->private_data = state; 658 file->private_data = state;
653 spin_lock_init(&state->lock); 659 spin_lock_init(&state->lock);
654 atomic_set(&state->n_pending, 0); 660 atomic_set(&state->n_pending, 0);
@@ -656,7 +662,9 @@ static int adb_open(struct inode *inode, struct file *file)
656 init_waitqueue_head(&state->wait_queue); 662 init_waitqueue_head(&state->wait_queue);
657 state->inuse = 1; 663 state->inuse = 1;
658 664
659 return 0; 665out:
666 unlock_kernel();
667 return ret;
660} 668}
661 669
662static int adb_release(struct inode *inode, struct file *file) 670static int adb_release(struct inode *inode, struct file *file)
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 73c50bc02095..6a8221893256 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/smp_lock.h>
6#include <linux/errno.h> 7#include <linux/errno.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/miscdevice.h> 9#include <linux/miscdevice.h>
@@ -119,6 +120,7 @@ anslcd_ioctl( struct inode * inode, struct file * file,
119static int 120static int
120anslcd_open( struct inode * inode, struct file * file ) 121anslcd_open( struct inode * inode, struct file * file )
121{ 122{
123 cycle_kernel_lock();
122 return 0; 124 return 0;
123} 125}
124 126
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 76dbf25cb70f..96faa799b82a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -19,6 +19,7 @@
19 * the userland interface 19 * the userland interface
20 */ 20 */
21 21
22#include <linux/smp_lock.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/device.h> 25#include <linux/device.h>
@@ -1090,10 +1091,12 @@ static int smu_open(struct inode *inode, struct file *file)
1090 pp->mode = smu_file_commands; 1091 pp->mode = smu_file_commands;
1091 init_waitqueue_head(&pp->wait); 1092 init_waitqueue_head(&pp->wait);
1092 1093
1094 lock_kernel();
1093 spin_lock_irqsave(&smu_clist_lock, flags); 1095 spin_lock_irqsave(&smu_clist_lock, flags);
1094 list_add(&pp->list, &smu_clist); 1096 list_add(&pp->list, &smu_clist);
1095 spin_unlock_irqrestore(&smu_clist_lock, flags); 1097 spin_unlock_irqrestore(&smu_clist_lock, flags);
1096 file->private_data = pp; 1098 file->private_data = pp;
1099 unlock_kernel();
1097 1100
1098 return 0; 1101 return 0;
1099} 1102}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d6365a9f0637..d524dc245a2c 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -18,6 +18,7 @@
18 * 18 *
19 */ 19 */
20#include <stdarg.h> 20#include <stdarg.h>
21#include <linux/smp_lock.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -2047,6 +2048,7 @@ pmu_open(struct inode *inode, struct file *file)
2047 pp->rb_get = pp->rb_put = 0; 2048 pp->rb_get = pp->rb_put = 0;
2048 spin_lock_init(&pp->lock); 2049 spin_lock_init(&pp->lock);
2049 init_waitqueue_head(&pp->wait); 2050 init_waitqueue_head(&pp->wait);
2051 lock_kernel();
2050 spin_lock_irqsave(&all_pvt_lock, flags); 2052 spin_lock_irqsave(&all_pvt_lock, flags);
2051#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2053#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2052 pp->backlight_locker = 0; 2054 pp->backlight_locker = 0;
@@ -2054,6 +2056,7 @@ pmu_open(struct inode *inode, struct file *file)
2054 list_add(&pp->list, &all_pmu_pvt); 2056 list_add(&pp->list, &all_pmu_pvt);
2055 spin_unlock_irqrestore(&all_pvt_lock, flags); 2057 spin_unlock_irqrestore(&all_pvt_lock, flags);
2056 file->private_data = pp; 2058 file->private_data = pp;
2059 unlock_kernel();
2057 return 0; 2060 return 0;
2058} 2061}
2059 2062
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10748240cb2f..6a866d7c8ae5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -50,17 +50,19 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
50/** 50/**
51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged 51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged
52 * @q: request queue 52 * @q: request queue
53 * @bio: the buffer head that's been built up so far 53 * @bvm: properties of new bio
54 * @biovec: the request that could be merged to it. 54 * @biovec: the request that could be merged to it.
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q,
59 struct bvec_merge_data *bvm,
60 struct bio_vec *biovec)
59{ 61{
60 mddev_t *mddev = q->queuedata; 62 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 63 dev_info_t *dev0;
62 unsigned long maxsectors, bio_sectors = bio->bi_size >> 9; 64 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
63 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 65 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
64 66
65 dev0 = which_dev(mddev, sector); 67 dev0 = which_dev(mddev, sector);
66 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1)); 68 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 914c04ddec7c..bcbb82594a19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -241,18 +241,20 @@ static int create_strip_zones (mddev_t *mddev)
241/** 241/**
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged 242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @q: request queue 243 * @q: request queue
244 * @bio: the buffer head that's been built up so far 244 * @bvm: properties of new bio
245 * @biovec: the request that could be merged to it. 245 * @biovec: the request that could be merged to it.
246 * 246 *
247 * Return amount of bytes we can accept at this offset 247 * Return amount of bytes we can accept at this offset
248 */ 248 */
249static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 249static int raid0_mergeable_bvec(struct request_queue *q,
250 struct bvec_merge_data *bvm,
251 struct bio_vec *biovec)
250{ 252{
251 mddev_t *mddev = q->queuedata; 253 mddev_t *mddev = q->queuedata;
252 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 254 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
253 int max; 255 int max;
254 unsigned int chunk_sectors = mddev->chunk_size >> 9; 256 unsigned int chunk_sectors = mddev->chunk_size >> 9;
255 unsigned int bio_sectors = bio->bi_size >> 9; 257 unsigned int bio_sectors = bvm->bi_size >> 9;
256 258
257 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 259 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
258 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 260 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a71277b640ab..22bb2b1b886d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -439,26 +439,27 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
439/** 439/**
440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
441 * @q: request queue 441 * @q: request queue
442 * @bio: the buffer head that's been built up so far 442 * @bvm: properties of new bio
443 * @biovec: the request that could be merged to it. 443 * @biovec: the request that could be merged to it.
444 * 444 *
445 * Return amount of bytes we can accept at this offset 445 * Return amount of bytes we can accept at this offset
446 * If near_copies == raid_disk, there are no striping issues, 446 * If near_copies == raid_disk, there are no striping issues,
447 * but in that case, the function isn't called at all. 447 * but in that case, the function isn't called at all.
448 */ 448 */
449static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio, 449static int raid10_mergeable_bvec(struct request_queue *q,
450 struct bio_vec *bio_vec) 450 struct bvec_merge_data *bvm,
451 struct bio_vec *biovec)
451{ 452{
452 mddev_t *mddev = q->queuedata; 453 mddev_t *mddev = q->queuedata;
453 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 454 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
454 int max; 455 int max;
455 unsigned int chunk_sectors = mddev->chunk_size >> 9; 456 unsigned int chunk_sectors = mddev->chunk_size >> 9;
456 unsigned int bio_sectors = bio->bi_size >> 9; 457 unsigned int bio_sectors = bvm->bi_size >> 9;
457 458
458 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 459 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
459 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 460 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
460 if (max <= bio_vec->bv_len && bio_sectors == 0) 461 if (max <= biovec->bv_len && bio_sectors == 0)
461 return bio_vec->bv_len; 462 return biovec->bv_len;
462 else 463 else
463 return max; 464 return max;
464} 465}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3b27df52456b..9ce7154845c6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3314,15 +3314,17 @@ static int raid5_congested(void *data, int bits)
3314/* We want read requests to align with chunks where possible, 3314/* We want read requests to align with chunks where possible,
3315 * but write requests don't need to. 3315 * but write requests don't need to.
3316 */ 3316 */
3317static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3317static int raid5_mergeable_bvec(struct request_queue *q,
3318 struct bvec_merge_data *bvm,
3319 struct bio_vec *biovec)
3318{ 3320{
3319 mddev_t *mddev = q->queuedata; 3321 mddev_t *mddev = q->queuedata;
3320 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3322 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3321 int max; 3323 int max;
3322 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3324 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3323 unsigned int bio_sectors = bio->bi_size >> 9; 3325 unsigned int bio_sectors = bvm->bi_size >> 9;
3324 3326
3325 if (bio_data_dir(bio) == WRITE) 3327 if ((bvm->bi_rw & 1) == WRITE)
3326 return biovec->bv_len; /* always allow writes to be mergeable */ 3328 return biovec->bv_len; /* always allow writes to be mergeable */
3327 3329
3328 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3330 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 8b56d929f7fd..e208a60c048a 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -32,6 +32,7 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/cdev.h> 33#include <linux/cdev.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/smp_lock.h>
35#include "dvbdev.h" 36#include "dvbdev.h"
36 37
37static int dvbdev_debug; 38static int dvbdev_debug;
@@ -74,6 +75,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
74{ 75{
75 struct dvb_device *dvbdev; 76 struct dvb_device *dvbdev;
76 77
78 lock_kernel();
77 dvbdev = dvbdev_find_device (iminor(inode)); 79 dvbdev = dvbdev_find_device (iminor(inode));
78 80
79 if (dvbdev && dvbdev->fops) { 81 if (dvbdev && dvbdev->fops) {
@@ -90,8 +92,10 @@ static int dvb_device_open(struct inode *inode, struct file *file)
90 file->f_op = fops_get(old_fops); 92 file->f_op = fops_get(old_fops);
91 } 93 }
92 fops_put(old_fops); 94 fops_put(old_fops);
95 unlock_kernel();
93 return err; 96 return err;
94 } 97 }
98 unlock_kernel();
95 return -ENODEV; 99 return -ENODEV;
96} 100}
97 101
diff --git a/drivers/media/radio/miropcm20-rds.c b/drivers/media/radio/miropcm20-rds.c
index 06dfed9ef4c7..3e840f74d45c 100644
--- a/drivers/media/radio/miropcm20-rds.c
+++ b/drivers/media/radio/miropcm20-rds.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smp_lock.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
@@ -27,13 +28,16 @@ static int rds_f_open(struct inode *in, struct file *fi)
27 if (rds_users) 28 if (rds_users)
28 return -EBUSY; 29 return -EBUSY;
29 30
31 lock_kernel();
30 rds_users++; 32 rds_users++;
31 if ((text_buffer=kmalloc(66, GFP_KERNEL)) == 0) { 33 if ((text_buffer=kmalloc(66, GFP_KERNEL)) == 0) {
32 rds_users--; 34 rds_users--;
33 printk(KERN_NOTICE "aci-rds: Out of memory by open()...\n"); 35 printk(KERN_NOTICE "aci-rds: Out of memory by open()...\n");
36 unlock_kernel();
34 return -ENOMEM; 37 return -ENOMEM;
35 } 38 }
36 39
40 unlock_kernel();
37 return 0; 41 return 0;
38} 42}
39 43
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 67a661cf5219..7649860a388d 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/kmod.h> 37#include <linux/kmod.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/system.h> 41#include <asm/system.h>
41 42
@@ -442,6 +443,7 @@ static int video_open(struct inode *inode, struct file *file)
442 443
443 if(minor>=VIDEO_NUM_DEVICES) 444 if(minor>=VIDEO_NUM_DEVICES)
444 return -ENODEV; 445 return -ENODEV;
446 lock_kernel();
445 mutex_lock(&videodev_lock); 447 mutex_lock(&videodev_lock);
446 vfl=video_device[minor]; 448 vfl=video_device[minor];
447 if(vfl==NULL) { 449 if(vfl==NULL) {
@@ -451,6 +453,7 @@ static int video_open(struct inode *inode, struct file *file)
451 vfl=video_device[minor]; 453 vfl=video_device[minor];
452 if (vfl==NULL) { 454 if (vfl==NULL) {
453 mutex_unlock(&videodev_lock); 455 mutex_unlock(&videodev_lock);
456 unlock_kernel();
454 return -ENODEV; 457 return -ENODEV;
455 } 458 }
456 } 459 }
@@ -464,6 +467,7 @@ static int video_open(struct inode *inode, struct file *file)
464 } 467 }
465 fops_put(old_fops); 468 fops_put(old_fops);
466 mutex_unlock(&videodev_lock); 469 mutex_unlock(&videodev_lock);
470 unlock_kernel();
467 return err; 471 return err;
468} 472}
469 473
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index e630b50966ec..c5946560c4e2 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -548,11 +548,15 @@ static int
548mptctl_fasync(int fd, struct file *filep, int mode) 548mptctl_fasync(int fd, struct file *filep, int mode)
549{ 549{
550 MPT_ADAPTER *ioc; 550 MPT_ADAPTER *ioc;
551 int ret;
551 552
553 lock_kernel();
552 list_for_each_entry(ioc, &ioc_list, list) 554 list_for_each_entry(ioc, &ioc_list, list)
553 ioc->aen_event_read_flag=0; 555 ioc->aen_event_read_flag=0;
554 556
555 return fasync_helper(fd, filep, mode, &async_queue); 557 ret = fasync_helper(fd, filep, mode, &async_queue);
558 unlock_kernel();
559 return ret;
556} 560}
557 561
558static int 562static int
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index c0fb77dc19bb..4238de98d4a6 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -1061,6 +1061,7 @@ static int cfg_open(struct inode *inode, struct file *file)
1061 if (!tmp) 1061 if (!tmp)
1062 return -ENOMEM; 1062 return -ENOMEM;
1063 1063
1064 lock_kernel();
1064 file->private_data = (void *)(i2o_cfg_info_id++); 1065 file->private_data = (void *)(i2o_cfg_info_id++);
1065 tmp->fp = file; 1066 tmp->fp = file;
1066 tmp->fasync = NULL; 1067 tmp->fasync = NULL;
@@ -1074,6 +1075,7 @@ static int cfg_open(struct inode *inode, struct file *file)
1074 spin_lock_irqsave(&i2o_config_lock, flags); 1075 spin_lock_irqsave(&i2o_config_lock, flags);
1075 open_files = tmp; 1076 open_files = tmp;
1076 spin_unlock_irqrestore(&i2o_config_lock, flags); 1077 spin_unlock_irqrestore(&i2o_config_lock, flags);
1078 unlock_kernel();
1077 1079
1078 return 0; 1080 return 0;
1079} 1081}
@@ -1082,15 +1084,17 @@ static int cfg_fasync(int fd, struct file *fp, int on)
1082{ 1084{
1083 ulong id = (ulong) fp->private_data; 1085 ulong id = (ulong) fp->private_data;
1084 struct i2o_cfg_info *p; 1086 struct i2o_cfg_info *p;
1087 int ret = -EBADF;
1085 1088
1089 lock_kernel();
1086 for (p = open_files; p; p = p->next) 1090 for (p = open_files; p; p = p->next)
1087 if (p->q_id == id) 1091 if (p->q_id == id)
1088 break; 1092 break;
1089 1093
1090 if (!p) 1094 if (p)
1091 return -EBADF; 1095 ret = fasync_helper(fd, fp, on, &p->fasync);
1092 1096 unlock_kernel();
1093 return fasync_helper(fd, fp, on, &p->fasync); 1097 return ret;
1094} 1098}
1095 1099
1096static int cfg_release(struct inode *inode, struct file *file) 1100static int cfg_release(struct inode *inode, struct file *file)
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 0d5ce03cdff2..5b5a14dab3d3 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -332,7 +332,7 @@ static int __init pwm_probe(struct platform_device *pdev)
332 p->base = ioremap(r->start, r->end - r->start + 1); 332 p->base = ioremap(r->start, r->end - r->start + 1);
333 if (!p->base) 333 if (!p->base)
334 goto fail; 334 goto fail;
335 p->clk = clk_get(&pdev->dev, "mck"); 335 p->clk = clk_get(&pdev->dev, "pwm_clk");
336 if (IS_ERR(p->clk)) { 336 if (IS_ERR(p->clk)) {
337 status = PTR_ERR(p->clk); 337 status = PTR_ERR(p->clk);
338 p->clk = NULL; 338 p->clk = NULL;
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
index ff51ab67231c..176fe4e09d3f 100644
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/smp_lock.h>
20#include <linux/miscdevice.h> 21#include <linux/miscdevice.h>
21#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
22#include <linux/hdpu_features.h> 23#include <linux/hdpu_features.h>
@@ -151,7 +152,13 @@ static ssize_t cpustate_write(struct file *file, const char *buf,
151 152
152static int cpustate_open(struct inode *inode, struct file *file) 153static int cpustate_open(struct inode *inode, struct file *file)
153{ 154{
154 return cpustate_get_ref((file->f_flags & O_EXCL)); 155 int ret;
156
157 lock_kernel();
158 ret = cpustate_get_ref((file->f_flags & O_EXCL));
159 unlock_kernel();
160
161 return ret;
155} 162}
156 163
157static int cpustate_release(struct inode *inode, struct file *file) 164static int cpustate_release(struct inode *inode, struct file *file)
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 71d1c84e2fa8..186162470090 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/phantom.h> 24#include <linux/phantom.h>
25#include <linux/smp_lock.h>
25 26
26#include <asm/atomic.h> 27#include <asm/atomic.h>
27#include <asm/io.h> 28#include <asm/io.h>
@@ -212,13 +213,17 @@ static int phantom_open(struct inode *inode, struct file *file)
212 struct phantom_device *dev = container_of(inode->i_cdev, 213 struct phantom_device *dev = container_of(inode->i_cdev,
213 struct phantom_device, cdev); 214 struct phantom_device, cdev);
214 215
216 lock_kernel();
215 nonseekable_open(inode, file); 217 nonseekable_open(inode, file);
216 218
217 if (mutex_lock_interruptible(&dev->open_lock)) 219 if (mutex_lock_interruptible(&dev->open_lock)) {
220 unlock_kernel();
218 return -ERESTARTSYS; 221 return -ERESTARTSYS;
222 }
219 223
220 if (dev->opened) { 224 if (dev->opened) {
221 mutex_unlock(&dev->open_lock); 225 mutex_unlock(&dev->open_lock);
226 unlock_kernel();
222 return -EINVAL; 227 return -EINVAL;
223 } 228 }
224 229
@@ -229,7 +234,7 @@ static int phantom_open(struct inode *inode, struct file *file)
229 atomic_set(&dev->counter, 0); 234 atomic_set(&dev->counter, 0);
230 dev->opened++; 235 dev->opened++;
231 mutex_unlock(&dev->open_lock); 236 mutex_unlock(&dev->open_lock);
232 237 unlock_kernel();
233 return 0; 238 return 0;
234} 239}
235 240
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 00e48e2a9c11..60775be22822 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -46,6 +46,7 @@
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/moduleparam.h> 47#include <linux/moduleparam.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/smp_lock.h>
49#include <linux/types.h> 50#include <linux/types.h>
50#include <linux/backlight.h> 51#include <linux/backlight.h>
51#include <linux/platform_device.h> 52#include <linux/platform_device.h>
@@ -1927,8 +1928,10 @@ static int sonypi_misc_release(struct inode *inode, struct file *file)
1927static int sonypi_misc_open(struct inode *inode, struct file *file) 1928static int sonypi_misc_open(struct inode *inode, struct file *file)
1928{ 1929{
1929 /* Flush input queue on first open */ 1930 /* Flush input queue on first open */
1931 lock_kernel();
1930 if (atomic_inc_return(&sonypi_compat.open_count) == 1) 1932 if (atomic_inc_return(&sonypi_compat.open_count) == 1)
1931 kfifo_reset(sonypi_compat.fifo); 1933 kfifo_reset(sonypi_compat.fifo);
1934 unlock_kernel();
1932 return 0; 1935 return 0;
1933} 1936}
1934 1937
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 95f33e87a99c..eed211b2ac70 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -42,6 +42,7 @@
42#include <linux/mmc/host.h> 42#include <linux/mmc/host.h>
43#include <linux/mmc/card.h> 43#include <linux/mmc/card.h>
44#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/clk.h>
45 46
46#include <asm/dma.h> 47#include <asm/dma.h>
47#include <asm/io.h> 48#include <asm/io.h>
@@ -92,6 +93,8 @@ struct imxmci_host {
92 unsigned char actual_bus_width; 93 unsigned char actual_bus_width;
93 94
94 int prev_cmd_code; 95 int prev_cmd_code;
96
97 struct clk *clk;
95}; 98};
96 99
97#define IMXMCI_PEND_IRQ_b 0 100#define IMXMCI_PEND_IRQ_b 0
@@ -841,7 +844,7 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
841 /* The prescaler is 5 for PERCLK2 equal to 96MHz 844 /* The prescaler is 5 for PERCLK2 equal to 96MHz
842 * then 96MHz / 5 = 19.2 MHz 845 * then 96MHz / 5 = 19.2 MHz
843 */ 846 */
844 clk=imx_get_perclk2(); 847 clk = clk_get_rate(host->clk);
845 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE; 848 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
846 switch(prescaler) { 849 switch(prescaler) {
847 case 0: 850 case 0:
@@ -994,6 +997,13 @@ static int imxmci_probe(struct platform_device *pdev)
994 host->res = r; 997 host->res = r;
995 host->irq = irq; 998 host->irq = irq;
996 999
1000 host->clk = clk_get(&pdev->dev, "perclk2");
1001 if (IS_ERR(host->clk)) {
1002 ret = PTR_ERR(host->clk);
1003 goto out;
1004 }
1005 clk_enable(host->clk);
1006
997 imx_gpio_mode(PB8_PF_SD_DAT0); 1007 imx_gpio_mode(PB8_PF_SD_DAT0);
998 imx_gpio_mode(PB9_PF_SD_DAT1); 1008 imx_gpio_mode(PB9_PF_SD_DAT1);
999 imx_gpio_mode(PB10_PF_SD_DAT2); 1009 imx_gpio_mode(PB10_PF_SD_DAT2);
@@ -1017,8 +1027,8 @@ static int imxmci_probe(struct platform_device *pdev)
1017 host->imask = IMXMCI_INT_MASK_DEFAULT; 1027 host->imask = IMXMCI_INT_MASK_DEFAULT;
1018 MMC_INT_MASK = host->imask; 1028 MMC_INT_MASK = host->imask;
1019 1029
1020 1030 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
1021 if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){ 1031 if(host->dma < 0) {
1022 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); 1032 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1023 ret = -EBUSY; 1033 ret = -EBUSY;
1024 goto out; 1034 goto out;
@@ -1053,6 +1063,10 @@ out:
1053 imx_dma_free(host->dma); 1063 imx_dma_free(host->dma);
1054 host->dma_allocated=0; 1064 host->dma_allocated=0;
1055 } 1065 }
1066 if (host->clk) {
1067 clk_disable(host->clk);
1068 clk_put(host->clk);
1069 }
1056 } 1070 }
1057 if (mmc) 1071 if (mmc)
1058 mmc_free_host(mmc); 1072 mmc_free_host(mmc);
@@ -1082,6 +1096,9 @@ static int imxmci_remove(struct platform_device *pdev)
1082 1096
1083 tasklet_kill(&host->tasklet); 1097 tasklet_kill(&host->tasklet);
1084 1098
1099 clk_disable(host->clk);
1100 clk_put(host->clk);
1101
1085 release_resource(host->res); 1102 release_resource(host->res);
1086 1103
1087 mmc_free_host(mmc); 1104 mmc_free_host(mmc);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 4a79b187b568..5c29872184e6 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -130,10 +130,6 @@ typedef struct partition_t {
130 u_int16_t DataUnits; 130 u_int16_t DataUnits;
131 u_int32_t BlocksPerUnit; 131 u_int32_t BlocksPerUnit;
132 erase_unit_header_t header; 132 erase_unit_header_t header;
133#if 0
134 region_info_t region;
135 memory_handle_t handle;
136#endif
137} partition_t; 133} partition_t;
138 134
139/* Partition state flags */ 135/* Partition state flags */
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index c12d8056bebd..68eec6c6c517 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -60,13 +60,22 @@ struct omapflash_info {
60static void omap_set_vpp(struct map_info *map, int enable) 60static void omap_set_vpp(struct map_info *map, int enable)
61{ 61{
62 static int count; 62 static int count;
63 63 u32 l;
64 if (enable) { 64
65 if (count++ == 0) 65 if (cpu_class_is_omap1()) {
66 OMAP_EMIFS_CONFIG_REG |= OMAP_EMIFS_CONFIG_WP; 66 if (enable) {
67 } else { 67 if (count++ == 0) {
68 if (count && (--count == 0)) 68 l = omap_readl(EMIFS_CONFIG);
69 OMAP_EMIFS_CONFIG_REG &= ~OMAP_EMIFS_CONFIG_WP; 69 l |= OMAP_EMIFS_CONFIG_WP;
70 omap_writel(l, EMIFS_CONFIG);
71 }
72 } else {
73 if (count && (--count == 0)) {
74 l = omap_readl(EMIFS_CONFIG);
75 l &= ~OMAP_EMIFS_CONFIG_WP;
76 omap_writel(l, EMIFS_CONFIG);
77 }
78 }
70 } 79 }
71} 80}
72 81
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 1912d968718b..0cc31675aeb9 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -498,17 +498,14 @@ static int pcmciamtd_config(struct pcmcia_device *link)
498 int i; 498 int i;
499 config_info_t t; 499 config_info_t t;
500 static char *probes[] = { "jedec_probe", "cfi_probe" }; 500 static char *probes[] = { "jedec_probe", "cfi_probe" };
501 cisinfo_t cisinfo;
502 int new_name = 0; 501 int new_name = 0;
503 502
504 DEBUG(3, "link=0x%p", link); 503 DEBUG(3, "link=0x%p", link);
505 504
506 DEBUG(2, "Validating CIS"); 505 DEBUG(2, "Validating CIS");
507 ret = pcmcia_validate_cis(link, &cisinfo); 506 ret = pcmcia_validate_cis(link, NULL);
508 if(ret != CS_SUCCESS) { 507 if(ret != CS_SUCCESS) {
509 cs_error(link, GetTupleData, ret); 508 cs_error(link, GetTupleData, ret);
510 } else {
511 DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
512 } 509 }
513 510
514 card_settings(dev, link, &new_name); 511 card_settings(dev, link, &new_name);
@@ -563,9 +560,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
563 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); 560 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
564 561
565 /* Get write protect status */ 562 /* Get write protect status */
566 CS_CHECK(GetStatus, pcmcia_get_status(link, &status)); 563 DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
567 DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
568 status.CardState, (unsigned long)link->win);
569 dev->win_base = ioremap(req.Base, req.Size); 564 dev->win_base = ioremap(req.Base, req.Size);
570 if(!dev->win_base) { 565 if(!dev->win_base) {
571 err("ioremap(%lu, %u) failed", req.Base, req.Size); 566 err("ioremap(%lu, %u) failed", req.Base, req.Size);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5d3ac512ce16..129d429cd2da 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/smp_lock.h>
17 18
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/compatmac.h> 20#include <linux/mtd/compatmac.h>
@@ -86,6 +87,7 @@ static int mtd_open(struct inode *inode, struct file *file)
86{ 87{
87 int minor = iminor(inode); 88 int minor = iminor(inode);
88 int devnum = minor >> 1; 89 int devnum = minor >> 1;
90 int ret = 0;
89 struct mtd_info *mtd; 91 struct mtd_info *mtd;
90 struct mtd_file_info *mfi; 92 struct mtd_file_info *mfi;
91 93
@@ -98,31 +100,39 @@ static int mtd_open(struct inode *inode, struct file *file)
98 if ((file->f_mode & 2) && (minor & 1)) 100 if ((file->f_mode & 2) && (minor & 1))
99 return -EACCES; 101 return -EACCES;
100 102
103 lock_kernel();
101 mtd = get_mtd_device(NULL, devnum); 104 mtd = get_mtd_device(NULL, devnum);
102 105
103 if (IS_ERR(mtd)) 106 if (IS_ERR(mtd)) {
104 return PTR_ERR(mtd); 107 ret = PTR_ERR(mtd);
108 goto out;
109 }
105 110
106 if (MTD_ABSENT == mtd->type) { 111 if (MTD_ABSENT == mtd->type) {
107 put_mtd_device(mtd); 112 put_mtd_device(mtd);
108 return -ENODEV; 113 ret = -ENODEV;
114 goto out;
109 } 115 }
110 116
111 /* You can't open it RW if it's not a writeable device */ 117 /* You can't open it RW if it's not a writeable device */
112 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 118 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
113 put_mtd_device(mtd); 119 put_mtd_device(mtd);
114 return -EACCES; 120 ret = -EACCES;
121 goto out;
115 } 122 }
116 123
117 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 124 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
118 if (!mfi) { 125 if (!mfi) {
119 put_mtd_device(mtd); 126 put_mtd_device(mtd);
120 return -ENOMEM; 127 ret = -ENOMEM;
128 goto out;
121 } 129 }
122 mfi->mtd = mtd; 130 mfi->mtd = mtd;
123 file->private_data = mfi; 131 file->private_data = mfi;
124 132
125 return 0; 133out:
134 unlock_kernel();
135 return ret;
126} /* mtd_open */ 136} /* mtd_open */
127 137
128/*====================================================================*/ 138/*====================================================================*/
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 59e05a1c50cf..ee2ac3948cd8 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -85,6 +85,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
85 nc->cmd_ctrl = orion_nand_cmd_ctrl; 85 nc->cmd_ctrl = orion_nand_cmd_ctrl;
86 nc->ecc.mode = NAND_ECC_SOFT; 86 nc->ecc.mode = NAND_ECC_SOFT;
87 87
88 if (board->chip_delay)
89 nc->chip_delay = board->chip_delay;
90
88 if (board->width == 16) 91 if (board->width == 16)
89 nc->options |= NAND_BUSWIDTH_16; 92 nc->options |= NAND_BUSWIDTH_16;
90 93
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 9d6aae5449b6..89193ba9451e 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -39,6 +39,7 @@
39#include <linux/stat.h> 39#include <linux/stat.h>
40#include <linux/ioctl.h> 40#include <linux/ioctl.h>
41#include <linux/capability.h> 41#include <linux/capability.h>
42#include <linux/smp_lock.h>
42#include <mtd/ubi-user.h> 43#include <mtd/ubi-user.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/div64.h> 45#include <asm/div64.h>
@@ -103,9 +104,12 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
103 struct ubi_volume_desc *desc; 104 struct ubi_volume_desc *desc;
104 int vol_id = iminor(inode) - 1, mode, ubi_num; 105 int vol_id = iminor(inode) - 1, mode, ubi_num;
105 106
107 lock_kernel();
106 ubi_num = ubi_major2num(imajor(inode)); 108 ubi_num = ubi_major2num(imajor(inode));
107 if (ubi_num < 0) 109 if (ubi_num < 0) {
110 unlock_kernel();
108 return ubi_num; 111 return ubi_num;
112 }
109 113
110 if (file->f_mode & FMODE_WRITE) 114 if (file->f_mode & FMODE_WRITE)
111 mode = UBI_READWRITE; 115 mode = UBI_READWRITE;
@@ -115,6 +119,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
115 dbg_msg("open volume %d, mode %d", vol_id, mode); 119 dbg_msg("open volume %d, mode %d", vol_id, mode);
116 120
117 desc = ubi_open_volume(ubi_num, vol_id, mode); 121 desc = ubi_open_volume(ubi_num, vol_id, mode);
122 unlock_kernel();
118 if (IS_ERR(desc)) 123 if (IS_ERR(desc))
119 return PTR_ERR(desc); 124 return PTR_ERR(desc);
120 125
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8e3e968b2957..2683ee32fc11 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -217,7 +217,7 @@ config MII
217 217
218config MACB 218config MACB
219 tristate "Atmel MACB support" 219 tristate "Atmel MACB support"
220 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91CAP9 220 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91CAP9
221 select PHYLIB 221 select PHYLIB
222 help 222 help
223 The Atmel MACB ethernet interface is found on many AT32 and AT91 223 The Atmel MACB ethernet interface is found on many AT32 and AT91
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 00081d2b9cd5..e9d15eccad08 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -647,7 +647,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
647 struct ei_device *ei_local; 647 struct ei_device *ei_local;
648 struct net_device *dev; 648 struct net_device *dev;
649 struct etherh_priv *eh; 649 struct etherh_priv *eh;
650 int i, ret; 650 int ret;
651 DECLARE_MAC_BUF(mac); 651 DECLARE_MAC_BUF(mac);
652 652
653 etherh_banner(); 653 etherh_banner();
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d5c2d27f3ea4..f76b0b6c277d 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -13,16 +13,8 @@
13 * 13 *
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/netdevice.h> 16#include <linux/netdevice.h>
20#include <linux/slab.h>
21#include <linux/rtnetlink.h>
22#include <linux/interrupt.h>
23#include <linux/dma-mapping.h>
24#include <linux/platform_device.h> 17#include <linux/platform_device.h>
25#include <linux/pm.h>
26#include <linux/clk.h> 18#include <linux/clk.h>
27 19
28#include <net/irda/irda.h> 20#include <net/irda/irda.h>
@@ -30,17 +22,9 @@
30#include <net/irda/wrapper.h> 22#include <net/irda/wrapper.h>
31#include <net/irda/irda_device.h> 23#include <net/irda/irda_device.h>
32 24
33#include <asm/irq.h>
34#include <asm/dma.h> 25#include <asm/dma.h>
35#include <asm/delay.h>
36#include <asm/hardware.h>
37#include <asm/arch/irda.h> 26#include <asm/arch/irda.h>
38#include <asm/arch/pxa-regs.h> 27#include <asm/arch/pxa-regs.h>
39#include <asm/arch/pxa2xx-gpio.h>
40
41#ifdef CONFIG_MACH_MAINSTONE
42#include <asm/arch/mainstone.h>
43#endif
44 28
45#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 29#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
46#define IrSR_RXPL_POS_IS_ZERO 0x0 30#define IrSR_RXPL_POS_IS_ZERO 0x0
@@ -163,10 +147,6 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
163 /* set board transceiver to SIR mode */ 147 /* set board transceiver to SIR mode */
164 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 148 si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
165 149
166 /* configure GPIO46/47 */
167 pxa_gpio_mode(GPIO46_STRXD_MD);
168 pxa_gpio_mode(GPIO47_STTXD_MD);
169
170 /* enable the STUART clock */ 150 /* enable the STUART clock */
171 pxa_irda_enable_sirclk(si); 151 pxa_irda_enable_sirclk(si);
172 } 152 }
@@ -201,10 +181,6 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
201 /* set board transceiver to FIR mode */ 181 /* set board transceiver to FIR mode */
202 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 182 si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
203 183
204 /* configure GPIO46/47 */
205 pxa_gpio_mode(GPIO46_ICPRXD_MD);
206 pxa_gpio_mode(GPIO47_ICPTXD_MD);
207
208 /* enable the FICP clock */ 184 /* enable the FICP clock */
209 pxa_irda_enable_firclk(si); 185 pxa_irda_enable_firclk(si);
210 186
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 92dccd43bdca..0a5745a854c7 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1277,8 +1277,45 @@ static int __exit macb_remove(struct platform_device *pdev)
1277 return 0; 1277 return 0;
1278} 1278}
1279 1279
1280#ifdef CONFIG_PM
1281static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1282{
1283 struct net_device *netdev = platform_get_drvdata(pdev);
1284 struct macb *bp = netdev_priv(netdev);
1285
1286 netif_device_detach(netdev);
1287
1288#ifndef CONFIG_ARCH_AT91
1289 clk_disable(bp->hclk);
1290#endif
1291 clk_disable(bp->pclk);
1292
1293 return 0;
1294}
1295
1296static int macb_resume(struct platform_device *pdev)
1297{
1298 struct net_device *netdev = platform_get_drvdata(pdev);
1299 struct macb *bp = netdev_priv(netdev);
1300
1301 clk_enable(bp->pclk);
1302#ifndef CONFIG_ARCH_AT91
1303 clk_enable(bp->hclk);
1304#endif
1305
1306 netif_device_attach(netdev);
1307
1308 return 0;
1309}
1310#else
1311#define macb_suspend NULL
1312#define macb_resume NULL
1313#endif
1314
1280static struct platform_driver macb_driver = { 1315static struct platform_driver macb_driver = {
1281 .remove = __exit_p(macb_remove), 1316 .remove = __exit_p(macb_remove),
1317 .suspend = macb_suspend,
1318 .resume = macb_resume,
1282 .driver = { 1319 .driver = {
1283 .name = "macb", 1320 .name = "macb",
1284 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1f4ca2b54a73..83625fdff3dd 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -39,6 +39,7 @@
39#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/ip.h> 40#include <linux/ip.h>
41#include <linux/tcp.h> 41#include <linux/tcp.h>
42#include <linux/smp_lock.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/rwsem.h> 44#include <linux/rwsem.h>
44#include <linux/stddef.h> 45#include <linux/stddef.h>
@@ -353,6 +354,7 @@ static const int npindex_to_ethertype[NUM_NP] = {
353 */ 354 */
354static int ppp_open(struct inode *inode, struct file *file) 355static int ppp_open(struct inode *inode, struct file *file)
355{ 356{
357 cycle_kernel_lock();
356 /* 358 /*
357 * This could (should?) be enforced by the permissions on /dev/ppp. 359 * This could (should?) be enforced by the permissions on /dev/ppp.
358 */ 360 */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b9018bfa0a97..eba1271b9735 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -48,6 +48,7 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/major.h> 49#include <linux/major.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/smp_lock.h>
51#include <linux/poll.h> 52#include <linux/poll.h>
52#include <linux/fcntl.h> 53#include <linux/fcntl.h>
53#include <linux/init.h> 54#include <linux/init.h>
@@ -802,22 +803,26 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
802 803
803 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 804 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
804 805
806 lock_kernel();
805 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 807 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
806 return ret; 808 goto out;
807 809
808 if (on) { 810 if (on) {
809 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 811 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
810 if (ret) 812 if (ret)
811 return ret; 813 goto out;
812 tun->flags |= TUN_FASYNC; 814 tun->flags |= TUN_FASYNC;
813 } else 815 } else
814 tun->flags &= ~TUN_FASYNC; 816 tun->flags &= ~TUN_FASYNC;
815 817 ret = 0;
816 return 0; 818out:
819 unlock_kernel();
820 return ret;
817} 821}
818 822
819static int tun_chr_open(struct inode *inode, struct file * file) 823static int tun_chr_open(struct inode *inode, struct file * file)
820{ 824{
825 cycle_kernel_lock();
821 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 826 DBG1(KERN_INFO "tunX: tun_chr_open\n");
822 file->private_data = NULL; 827 file->private_data = NULL;
823 return 0; 828 return 0;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b0fce1387eaf..5827324e9d9f 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -92,6 +92,7 @@
92#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/mutex.h> 93#include <linux/mutex.h>
94#include <linux/device.h> 94#include <linux/device.h>
95#include <linux/smp_lock.h>
95 96
96#undef COSA_SLOW_IO /* for testing purposes only */ 97#undef COSA_SLOW_IO /* for testing purposes only */
97 98
@@ -970,15 +971,21 @@ static int cosa_open(struct inode *inode, struct file *file)
970 struct channel_data *chan; 971 struct channel_data *chan;
971 unsigned long flags; 972 unsigned long flags;
972 int n; 973 int n;
974 int ret = 0;
973 975
976 lock_kernel();
974 if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS) 977 if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
975 >= nr_cards) 978 >= nr_cards) {
976 return -ENODEV; 979 ret = -ENODEV;
980 goto out;
981 }
977 cosa = cosa_cards+n; 982 cosa = cosa_cards+n;
978 983
979 if ((n=iminor(file->f_path.dentry->d_inode) 984 if ((n=iminor(file->f_path.dentry->d_inode)
980 & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) 985 & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
981 return -ENODEV; 986 ret = -ENODEV;
987 goto out;
988 }
982 chan = cosa->chan + n; 989 chan = cosa->chan + n;
983 990
984 file->private_data = chan; 991 file->private_data = chan;
@@ -987,7 +994,8 @@ static int cosa_open(struct inode *inode, struct file *file)
987 994
988 if (chan->usage < 0) { /* in netdev mode */ 995 if (chan->usage < 0) { /* in netdev mode */
989 spin_unlock_irqrestore(&cosa->lock, flags); 996 spin_unlock_irqrestore(&cosa->lock, flags);
990 return -EBUSY; 997 ret = -EBUSY;
998 goto out;
991 } 999 }
992 cosa->usage++; 1000 cosa->usage++;
993 chan->usage++; 1001 chan->usage++;
@@ -996,7 +1004,9 @@ static int cosa_open(struct inode *inode, struct file *file)
996 chan->setup_rx = chrdev_setup_rx; 1004 chan->setup_rx = chrdev_setup_rx;
997 chan->rx_done = chrdev_rx_done; 1005 chan->rx_done = chrdev_rx_done;
998 spin_unlock_irqrestore(&cosa->lock, flags); 1006 spin_unlock_irqrestore(&cosa->lock, flags);
999 return 0; 1007out:
1008 unlock_kernel();
1009 return ret;
1000} 1010}
1001 1011
1002static int cosa_release(struct inode *inode, struct file *file) 1012static int cosa_release(struct inode *inode, struct file *file)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d26f69b0184f..ef671d1a3bf0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1324,7 +1324,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1324 goto fail; 1324 goto fail;
1325 } 1325 }
1326 1326
1327 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); 1327 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1328 if (!txs) { 1328 if (!txs) {
1329 err = -ENOMEM; 1329 err = -ENOMEM;
1330 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1330 xenbus_dev_fatal(dev, err, "allocating tx ring page");
@@ -1340,7 +1340,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1340 } 1340 }
1341 1341
1342 info->tx_ring_ref = err; 1342 info->tx_ring_ref = err;
1343 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); 1343 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1344 if (!rxs) { 1344 if (!rxs) {
1345 err = -ENOMEM; 1345 err = -ENOMEM;
1346 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1346 xenbus_dev_fatal(dev, err, "allocating rx ring page");
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 86e9c84a965e..5ac207932fd7 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/miscdevice.h> 25#include <linux/miscdevice.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/smp_lock.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -83,6 +84,8 @@ static int eisa_eeprom_ioctl(struct inode *inode, struct file *file,
83 84
84static int eisa_eeprom_open(struct inode *inode, struct file *file) 85static int eisa_eeprom_open(struct inode *inode, struct file *file)
85{ 86{
87 cycle_kernel_lock();
88
86 if (file->f_mode & 2) 89 if (file->f_mode & 2)
87 return -EINVAL; 90 return -EINVAL;
88 91
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 66c0fd21894b..bb0642318a95 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1637,12 +1637,43 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1637} 1637}
1638 1638
1639#ifdef CONFIG_DMAR_GFX_WA 1639#ifdef CONFIG_DMAR_GFX_WA
1640extern int arch_get_ram_range(int slot, u64 *addr, u64 *size); 1640struct iommu_prepare_data {
1641 struct pci_dev *pdev;
1642 int ret;
1643};
1644
1645static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1646 unsigned long end_pfn, void *datax)
1647{
1648 struct iommu_prepare_data *data;
1649
1650 data = (struct iommu_prepare_data *)datax;
1651
1652 data->ret = iommu_prepare_identity_map(data->pdev,
1653 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1654 return data->ret;
1655
1656}
1657
1658static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1659{
1660 int nid;
1661 struct iommu_prepare_data data;
1662
1663 data.pdev = pdev;
1664 data.ret = 0;
1665
1666 for_each_online_node(nid) {
1667 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1668 if (data.ret)
1669 return data.ret;
1670 }
1671 return data.ret;
1672}
1673
1641static void __init iommu_prepare_gfx_mapping(void) 1674static void __init iommu_prepare_gfx_mapping(void)
1642{ 1675{
1643 struct pci_dev *pdev = NULL; 1676 struct pci_dev *pdev = NULL;
1644 u64 base, size;
1645 int slot;
1646 int ret; 1677 int ret;
1647 1678
1648 for_each_pci_dev(pdev) { 1679 for_each_pci_dev(pdev) {
@@ -1651,17 +1682,9 @@ static void __init iommu_prepare_gfx_mapping(void)
1651 continue; 1682 continue;
1652 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", 1683 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1653 pci_name(pdev)); 1684 pci_name(pdev));
1654 slot = arch_get_ram_range(0, &base, &size); 1685 ret = iommu_prepare_with_active_regions(pdev);
1655 while (slot >= 0) { 1686 if (ret)
1656 ret = iommu_prepare_identity_map(pdev, 1687 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1657 base, base + size);
1658 if (ret)
1659 goto error;
1660 slot = arch_get_ram_range(slot, &base, &size);
1661 }
1662 continue;
1663error:
1664 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1665 } 1688 }
1666} 1689}
1667#endif 1690#endif
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 1b0eb5aaf650..e45402adac3f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -263,6 +263,13 @@ config OMAP_CF
263 Say Y here to support the CompactFlash controller on OMAP. 263 Say Y here to support the CompactFlash controller on OMAP.
264 Note that this doesn't support "True IDE" mode. 264 Note that this doesn't support "True IDE" mode.
265 265
266config BFIN_CFPCMCIA
267 tristate "Blackfin CompactFlash PCMCIA Driver"
268 depends on PCMCIA && BLACKFIN
269 help
270 Say Y here to support the CompactFlash PCMCIA driver for Blackfin.
271
272
266config AT91_CF 273config AT91_CF
267 tristate "AT91 CompactFlash Controller" 274 tristate "AT91 CompactFlash Controller"
268 depends on PCMCIA && ARCH_AT91RM9200 275 depends on PCMCIA && ARCH_AT91RM9200
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 6f6478ba7174..85c6cc931f97 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
38obj-$(CONFIG_OMAP_CF) += omap_cf.o 38obj-$(CONFIG_OMAP_CF) += omap_cf.o
39obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
39obj-$(CONFIG_AT91_CF) += at91_cf.o 40obj-$(CONFIG_AT91_CF) += at91_cf.o
40obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 41obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
41 42
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index 1e467bb54077..a53ef5902518 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -26,7 +26,6 @@
26#include <pcmcia/cs_types.h> 26#include <pcmcia/cs_types.h>
27#include <pcmcia/cs.h> 27#include <pcmcia/cs.h>
28#include <pcmcia/ss.h> 28#include <pcmcia/ss.h>
29#include <pcmcia/bulkmem.h>
30#include <pcmcia/cistpl.h> 29#include <pcmcia/cistpl.h>
31#include "cs_internal.h" 30#include "cs_internal.h"
32 31
@@ -34,9 +33,9 @@
34#define AU1000_PCMCIA_IO_SPEED (255) 33#define AU1000_PCMCIA_IO_SPEED (255)
35#define AU1000_PCMCIA_MEM_SPEED (300) 34#define AU1000_PCMCIA_MEM_SPEED (300)
36 35
37#define AU1X_SOCK0_IO 0xF00000000 36#define AU1X_SOCK0_IO 0xF00000000ULL
38#define AU1X_SOCK0_PHYS_ATTR 0xF40000000 37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
39#define AU1X_SOCK0_PHYS_MEM 0xF80000000 38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
40/* pseudo 32 bit phys addresses, which get fixed up to the 39/* pseudo 32 bit phys addresses, which get fixed up to the
41 * real 36 bit address in fixup_bigphys_addr() */ 40 * real 36 bit address in fixup_bigphys_addr() */
42#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000 41#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000
@@ -45,16 +44,20 @@
45/* pcmcia socket 1 needs external glue logic so the memory map 44/* pcmcia socket 1 needs external glue logic so the memory map
46 * differs from board to board. 45 * differs from board to board.
47 */ 46 */
48#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_PB1200) 47#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || \
49#define AU1X_SOCK1_IO 0xF08000000 48 defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || \
50#define AU1X_SOCK1_PHYS_ATTR 0xF48000000 49 defined(CONFIG_MIPS_PB1200)
51#define AU1X_SOCK1_PHYS_MEM 0xF88000000 50#define AU1X_SOCK1_IO 0xF08000000ULL
51#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
52#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
52#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000 53#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000
53#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000 54#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000
54#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || defined(CONFIG_MIPS_DB1200) 55#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \
55#define AU1X_SOCK1_IO 0xF04000000 56 defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || \
56#define AU1X_SOCK1_PHYS_ATTR 0xF44000000 57 defined(CONFIG_MIPS_DB1200)
57#define AU1X_SOCK1_PHYS_MEM 0xF84000000 58#define AU1X_SOCK1_IO 0xF04000000ULL
59#define AU1X_SOCK1_PHYS_ATTR 0xF44000000ULL
60#define AU1X_SOCK1_PHYS_MEM 0xF84000000ULL
58#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000 61#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000
59#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000 62#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000
60#endif 63#endif
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 157e41423a0a..aa1cd4d3aa29 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -35,7 +35,6 @@
35#include <pcmcia/cs_types.h> 35#include <pcmcia/cs_types.h>
36#include <pcmcia/cs.h> 36#include <pcmcia/cs.h>
37#include <pcmcia/ss.h> 37#include <pcmcia/ss.h>
38#include <pcmcia/bulkmem.h>
39#include <pcmcia/cistpl.h> 38#include <pcmcia/cistpl.h>
40#include <pcmcia/bus_ops.h> 39#include <pcmcia/bus_ops.h>
41#include "cs_internal.h" 40#include "cs_internal.h"
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
index c78ed5347510..8a9b18cee847 100644
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ b/drivers/pcmcia/au1000_xxs1500.c
@@ -39,7 +39,6 @@
39#include <pcmcia/cs_types.h> 39#include <pcmcia/cs_types.h>
40#include <pcmcia/cs.h> 40#include <pcmcia/cs.h>
41#include <pcmcia/ss.h> 41#include <pcmcia/ss.h>
42#include <pcmcia/bulkmem.h>
43#include <pcmcia/cistpl.h> 42#include <pcmcia/cistpl.h>
44#include <pcmcia/bus_ops.h> 43#include <pcmcia/bus_ops.h>
45#include "cs_internal.h" 44#include "cs_internal.h"
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
new file mode 100644
index 000000000000..bb7338863fb9
--- /dev/null
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -0,0 +1,339 @@
1/*
2 * file: drivers/pcmcia/bfin_cf.c
3 *
4 * based on: drivers/pcmcia/omap_cf.c
5 * omap_cf.c -- OMAP 16xx CompactFlash controller driver
6 *
7 * Copyright (c) 2005 David Brownell
8 * Copyright (c) 2006-2008 Michael Hennerich Analog Devices Inc.
9 *
10 * bugs: enter bugs at http://blackfin.uclinux.org/
11 *
12 * this program is free software; you can redistribute it and/or modify
13 * it under the terms of the gnu general public license as published by
14 * the free software foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * this program is distributed in the hope that it will be useful,
18 * but without any warranty; without even the implied warranty of
19 * merchantability or fitness for a particular purpose. see the
20 * gnu general public license for more details.
21 *
22 * you should have received a copy of the gnu general public license
23 * along with this program; see the file copying.
24 * if not, write to the free software foundation,
25 * 59 temple place - suite 330, boston, ma 02111-1307, usa.
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/platform_device.h>
32#include <linux/errno.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/interrupt.h>
36#include <linux/irq.h>
37#include <linux/io.h>
38
39#include <pcmcia/ss.h>
40#include <pcmcia/cisreg.h>
41#include <asm/gpio.h>
42
43#define SZ_1K 0x00000400
44#define SZ_8K 0x00002000
45#define SZ_2K (2 * SZ_1K)
46
47#define POLL_INTERVAL (2 * HZ)
48
49#define CF_ATASEL_ENA 0x20311802 /* Inverts RESET */
50#define CF_ATASEL_DIS 0x20311800
51
52#define bfin_cf_present(pfx) (gpio_get_value(pfx))
53
54/*--------------------------------------------------------------------------*/
55
56static const char driver_name[] = "bfin_cf_pcmcia";
57
58struct bfin_cf_socket {
59 struct pcmcia_socket socket;
60
61 struct timer_list timer;
62 unsigned present:1;
63 unsigned active:1;
64
65 struct platform_device *pdev;
66 unsigned long phys_cf_io;
67 unsigned long phys_cf_attr;
68 u_int irq;
69 u_short cd_pfx;
70};
71
72/*--------------------------------------------------------------------------*/
73static int bfin_cf_reset(void)
74{
75 outw(0, CF_ATASEL_ENA);
76 mdelay(200);
77 outw(0, CF_ATASEL_DIS);
78
79 return 0;
80}
81
82static int bfin_cf_ss_init(struct pcmcia_socket *s)
83{
84 return 0;
85}
86
87/* the timer is primarily to kick this socket's pccardd */
88static void bfin_cf_timer(unsigned long _cf)
89{
90 struct bfin_cf_socket *cf = (void *)_cf;
91 unsigned short present = bfin_cf_present(cf->cd_pfx);
92
93 if (present != cf->present) {
94 cf->present = present;
95 dev_dbg(&cf->pdev->dev, ": card %s\n",
96 present ? "present" : "gone");
97 pcmcia_parse_events(&cf->socket, SS_DETECT);
98 }
99
100 if (cf->active)
101 mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
102}
103
104static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
105{
106 struct bfin_cf_socket *cf;
107
108 if (!sp)
109 return -EINVAL;
110
111 cf = container_of(s, struct bfin_cf_socket, socket);
112
113 if (bfin_cf_present(cf->cd_pfx)) {
114 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
115 s->irq.AssignedIRQ = 0;
116 s->pci_irq = cf->irq;
117
118 } else
119 *sp = 0;
120 return 0;
121}
122
123static int
124bfin_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
125{
126
127 struct bfin_cf_socket *cf;
128 cf = container_of(sock, struct bfin_cf_socket, socket);
129
130 switch (s->Vcc) {
131 case 0:
132 case 33:
133 break;
134 case 50:
135 break;
136 default:
137 return -EINVAL;
138 }
139
140 if (s->flags & SS_RESET) {
141 disable_irq(cf->irq);
142 bfin_cf_reset();
143 enable_irq(cf->irq);
144 }
145
146 dev_dbg(&cf->pdev->dev, ": Vcc %d, io_irq %d, flags %04x csc %04x\n",
147 s->Vcc, s->io_irq, s->flags, s->csc_mask);
148
149 return 0;
150}
151
152static int bfin_cf_ss_suspend(struct pcmcia_socket *s)
153{
154 return bfin_cf_set_socket(s, &dead_socket);
155}
156
157/* regions are 2K each: mem, attrib, io (and reserved-for-ide) */
158
159static int bfin_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
160{
161 struct bfin_cf_socket *cf;
162
163 cf = container_of(s, struct bfin_cf_socket, socket);
164 io->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
165 io->start = cf->phys_cf_io;
166 io->stop = io->start + SZ_2K - 1;
167 return 0;
168}
169
170static int
171bfin_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
172{
173 struct bfin_cf_socket *cf;
174
175 if (map->card_start)
176 return -EINVAL;
177 cf = container_of(s, struct bfin_cf_socket, socket);
178 map->static_start = cf->phys_cf_io;
179 map->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
180 if (map->flags & MAP_ATTRIB)
181 map->static_start = cf->phys_cf_attr;
182
183 return 0;
184}
185
186static struct pccard_operations bfin_cf_ops = {
187 .init = bfin_cf_ss_init,
188 .suspend = bfin_cf_ss_suspend,
189 .get_status = bfin_cf_get_status,
190 .set_socket = bfin_cf_set_socket,
191 .set_io_map = bfin_cf_set_io_map,
192 .set_mem_map = bfin_cf_set_mem_map,
193};
194
195/*--------------------------------------------------------------------------*/
196
197static int __devinit bfin_cf_probe(struct platform_device *pdev)
198{
199 struct bfin_cf_socket *cf;
200 struct resource *io_mem, *attr_mem;
201 int irq;
202 unsigned short cd_pfx;
203 int status = 0;
204
205 dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n");
206
207 irq = platform_get_irq(pdev, 0);
208 if (!irq)
209 return -EINVAL;
210
211 cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */
212
213 if (gpio_request(cd_pfx, "pcmcia: CD")) {
214 dev_err(&pdev->dev,
215 "Failed ro request Card Detect GPIO_%d\n",
216 cd_pfx);
217 return -EBUSY;
218 }
219 gpio_direction_input(cd_pfx);
220
221 cf = kzalloc(sizeof *cf, GFP_KERNEL);
222 if (!cf) {
223 gpio_free(cd_pfx);
224 return -ENOMEM;
225 }
226
227 cf->cd_pfx = cd_pfx;
228
229 setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf);
230
231 cf->pdev = pdev;
232 platform_set_drvdata(pdev, cf);
233
234 cf->irq = irq;
235 cf->socket.pci_irq = irq;
236
237 set_irq_type(irq, IRQF_TRIGGER_LOW);
238
239 io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
241
242 if (!io_mem || !attr_mem)
243 goto fail0;
244
245 cf->phys_cf_io = io_mem->start;
246 cf->phys_cf_attr = attr_mem->start;
247
248 /* pcmcia layer only remaps "real" memory */
249 cf->socket.io_offset = (unsigned long)
250 ioremap(cf->phys_cf_io, SZ_2K);
251
252 if (!cf->socket.io_offset)
253 goto fail0;
254
255 dev_err(&pdev->dev, ": on irq %d\n", irq);
256
257 dev_dbg(&pdev->dev, ": %s\n",
258 bfin_cf_present(cf->cd_pfx) ? "present" : "(not present)");
259
260 cf->socket.owner = THIS_MODULE;
261 cf->socket.dev.parent = &pdev->dev;
262 cf->socket.ops = &bfin_cf_ops;
263 cf->socket.resource_ops = &pccard_static_ops;
264 cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
265 | SS_CAP_MEM_ALIGN;
266 cf->socket.map_size = SZ_2K;
267
268 status = pcmcia_register_socket(&cf->socket);
269 if (status < 0)
270 goto fail2;
271
272 cf->active = 1;
273 mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
274 return 0;
275
276fail2:
277 iounmap((void __iomem *)cf->socket.io_offset);
278 release_mem_region(cf->phys_cf_io, SZ_8K);
279
280fail0:
281 gpio_free(cf->cd_pfx);
282 kfree(cf);
283 platform_set_drvdata(pdev, NULL);
284
285 return status;
286}
287
288static int __devexit bfin_cf_remove(struct platform_device *pdev)
289{
290 struct bfin_cf_socket *cf = platform_get_drvdata(pdev);
291
292 gpio_free(cf->cd_pfx);
293 cf->active = 0;
294 pcmcia_unregister_socket(&cf->socket);
295 del_timer_sync(&cf->timer);
296 iounmap((void __iomem *)cf->socket.io_offset);
297 release_mem_region(cf->phys_cf_io, SZ_8K);
298 platform_set_drvdata(pdev, NULL);
299 kfree(cf);
300 return 0;
301}
302
303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
304{
305 return pcmcia_socket_dev_suspend(&pdev->dev, mesg);
306}
307
308static int bfin_cf_resume(struct platform_device *pdev)
309{
310 return pcmcia_socket_dev_resume(&pdev->dev);
311}
312
313static struct platform_driver bfin_cf_driver = {
314 .driver = {
315 .name = (char *)driver_name,
316 .owner = THIS_MODULE,
317 },
318 .probe = bfin_cf_probe,
319 .remove = __devexit_p(bfin_cf_remove),
320 .suspend = bfin_cf_suspend,
321 .resume = bfin_cf_resume,
322};
323
324static int __init bfin_cf_init(void)
325{
326 return platform_driver_register(&bfin_cf_driver);
327}
328
329static void __exit bfin_cf_exit(void)
330{
331 platform_driver_unregister(&bfin_cf_driver);
332}
333
334module_init(bfin_cf_init);
335module_exit(bfin_cf_exit);
336
337MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>")
338MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
339MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index fb2f38dc92c5..911ca0e8dfc2 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -30,11 +30,9 @@
30#include <asm/irq.h> 30#include <asm/irq.h>
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define IN_CARD_SERVICES
34#include <pcmcia/cs_types.h> 33#include <pcmcia/cs_types.h>
35#include <pcmcia/ss.h> 34#include <pcmcia/ss.h>
36#include <pcmcia/cs.h> 35#include <pcmcia/cs.h>
37#include <pcmcia/bulkmem.h>
38#include <pcmcia/cistpl.h> 36#include <pcmcia/cistpl.h>
39#include "cs_internal.h" 37#include "cs_internal.h"
40 38
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 36379535f9da..9fcff0c33619 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -30,7 +30,6 @@
30#include <pcmcia/cs_types.h> 30#include <pcmcia/cs_types.h>
31#include <pcmcia/ss.h> 31#include <pcmcia/ss.h>
32#include <pcmcia/cs.h> 32#include <pcmcia/cs.h>
33#include <pcmcia/bulkmem.h>
34#include <pcmcia/cisreg.h> 33#include <pcmcia/cisreg.h>
35#include <pcmcia/cistpl.h> 34#include <pcmcia/cistpl.h>
36#include "cs_internal.h" 35#include "cs_internal.h"
@@ -1439,10 +1438,11 @@ EXPORT_SYMBOL(pccard_read_tuple);
1439 1438
1440======================================================================*/ 1439======================================================================*/
1441 1440
1442int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_t *info) 1441int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned int *info)
1443{ 1442{
1444 tuple_t *tuple; 1443 tuple_t *tuple;
1445 cisparse_t *p; 1444 cisparse_t *p;
1445 unsigned int count = 0;
1446 int ret, reserved, dev_ok = 0, ident_ok = 0; 1446 int ret, reserved, dev_ok = 0, ident_ok = 0;
1447 1447
1448 if (!s) 1448 if (!s)
@@ -1457,7 +1457,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1457 return CS_OUT_OF_RESOURCE; 1457 return CS_OUT_OF_RESOURCE;
1458 } 1458 }
1459 1459
1460 info->Chains = reserved = 0; 1460 count = reserved = 0;
1461 tuple->DesiredTuple = RETURN_FIRST_TUPLE; 1461 tuple->DesiredTuple = RETURN_FIRST_TUPLE;
1462 tuple->Attributes = TUPLE_RETURN_COMMON; 1462 tuple->Attributes = TUPLE_RETURN_COMMON;
1463 ret = pccard_get_first_tuple(s, function, tuple); 1463 ret = pccard_get_first_tuple(s, function, tuple);
@@ -1482,7 +1482,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1482 if (!dev_ok && !ident_ok) 1482 if (!dev_ok && !ident_ok)
1483 goto done; 1483 goto done;
1484 1484
1485 for (info->Chains = 1; info->Chains < MAX_TUPLES; info->Chains++) { 1485 for (count = 1; count < MAX_TUPLES; count++) {
1486 ret = pccard_get_next_tuple(s, function, tuple); 1486 ret = pccard_get_next_tuple(s, function, tuple);
1487 if (ret != CS_SUCCESS) break; 1487 if (ret != CS_SUCCESS) break;
1488 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || 1488 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
@@ -1490,11 +1490,13 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) 1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
1491 reserved++; 1491 reserved++;
1492 } 1492 }
1493 if ((info->Chains == MAX_TUPLES) || (reserved > 5) || 1493 if ((count) || (reserved > 5) ||
1494 ((!dev_ok || !ident_ok) && (info->Chains > 10))) 1494 ((!dev_ok || !ident_ok) && (count > 10)))
1495 info->Chains = 0; 1495 count = 0;
1496 1496
1497done: 1497done:
1498 if (info)
1499 *info = count;
1498 kfree(tuple); 1500 kfree(tuple);
1499 kfree(p); 1501 kfree(p);
1500 return CS_SUCCESS; 1502 return CS_SUCCESS;
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 29276bd28295..d1207393fc3e 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -32,11 +32,9 @@
32#include <asm/system.h> 32#include <asm/system.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34 34
35#define IN_CARD_SERVICES
36#include <pcmcia/cs_types.h> 35#include <pcmcia/cs_types.h>
37#include <pcmcia/ss.h> 36#include <pcmcia/ss.h>
38#include <pcmcia/cs.h> 37#include <pcmcia/cs.h>
39#include <pcmcia/bulkmem.h>
40#include <pcmcia/cistpl.h> 38#include <pcmcia/cistpl.h>
41#include <pcmcia/cisreg.h> 39#include <pcmcia/cisreg.h>
42#include <pcmcia/ds.h> 40#include <pcmcia/ds.h>
@@ -238,7 +236,6 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
238 236
239 init_completion(&socket->socket_released); 237 init_completion(&socket->socket_released);
240 init_completion(&socket->thread_done); 238 init_completion(&socket->thread_done);
241 init_waitqueue_head(&socket->thread_wait);
242 mutex_init(&socket->skt_mutex); 239 mutex_init(&socket->skt_mutex);
243 spin_lock_init(&socket->thread_lock); 240 spin_lock_init(&socket->thread_lock);
244 241
@@ -278,10 +275,9 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
278 275
279 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops); 276 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops);
280 277
281 if (socket->thread) { 278 if (socket->thread)
282 wake_up(&socket->thread_wait);
283 kthread_stop(socket->thread); 279 kthread_stop(socket->thread);
284 } 280
285 release_cis_mem(socket); 281 release_cis_mem(socket);
286 282
287 /* remove from our own list */ 283 /* remove from our own list */
@@ -635,7 +631,6 @@ static void socket_detect_change(struct pcmcia_socket *skt)
635static int pccardd(void *__skt) 631static int pccardd(void *__skt)
636{ 632{
637 struct pcmcia_socket *skt = __skt; 633 struct pcmcia_socket *skt = __skt;
638 DECLARE_WAITQUEUE(wait, current);
639 int ret; 634 int ret;
640 635
641 skt->thread = current; 636 skt->thread = current;
@@ -656,7 +651,6 @@ static int pccardd(void *__skt)
656 if (ret) 651 if (ret)
657 dev_warn(&skt->dev, "err %d adding socket attributes\n", ret); 652 dev_warn(&skt->dev, "err %d adding socket attributes\n", ret);
658 653
659 add_wait_queue(&skt->thread_wait, &wait);
660 complete(&skt->thread_done); 654 complete(&skt->thread_done);
661 655
662 set_freezable(); 656 set_freezable();
@@ -694,8 +688,6 @@ static int pccardd(void *__skt)
694 /* make sure we are running before we exit */ 688 /* make sure we are running before we exit */
695 set_current_state(TASK_RUNNING); 689 set_current_state(TASK_RUNNING);
696 690
697 remove_wait_queue(&skt->thread_wait, &wait);
698
699 /* remove from the device core */ 691 /* remove from the device core */
700 pccard_sysfs_remove_socket(&skt->dev); 692 pccard_sysfs_remove_socket(&skt->dev);
701 device_unregister(&skt->dev); 693 device_unregister(&skt->dev);
@@ -716,7 +708,7 @@ void pcmcia_parse_events(struct pcmcia_socket *s, u_int events)
716 s->thread_events |= events; 708 s->thread_events |= events;
717 spin_unlock_irqrestore(&s->thread_lock, flags); 709 spin_unlock_irqrestore(&s->thread_lock, flags);
718 710
719 wake_up(&s->thread_wait); 711 wake_up_process(s->thread);
720 } 712 }
721} /* pcmcia_parse_events */ 713} /* pcmcia_parse_events */
722EXPORT_SYMBOL(pcmcia_parse_events); 714EXPORT_SYMBOL(pcmcia_parse_events);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index e7d5d141f24d..63dc1a28bda2 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -26,18 +26,6 @@
26#define CLIENT_WIN_REQ(i) (0x1<<(i)) 26#define CLIENT_WIN_REQ(i) (0x1<<(i))
27#define CLIENT_CARDBUS 0x8000 27#define CLIENT_CARDBUS 0x8000
28 28
29#define REGION_MAGIC 0xE3C9
30typedef struct region_t {
31 u_short region_magic;
32 u_short state;
33 dev_info_t dev_info;
34 struct pcmcia_device *mtd;
35 u_int MediaID;
36 region_info_t info;
37} region_t;
38
39#define REGION_STALE 0x01
40
41/* Each card function gets one of these guys */ 29/* Each card function gets one of these guys */
42typedef struct config_t { 30typedef struct config_t {
43 struct kref ref; 31 struct kref ref;
@@ -130,7 +118,6 @@ extern struct list_head pcmcia_socket_list;
130int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req); 118int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req);
131int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config); 119int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config);
132int pccard_reset_card(struct pcmcia_socket *skt); 120int pccard_reset_card(struct pcmcia_socket *skt);
133int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_status_t *status);
134 121
135 122
136struct pcmcia_callback{ 123struct pcmcia_callback{
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index e40775443d04..4174d9656e35 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -25,7 +25,6 @@
25#include <linux/kref.h> 25#include <linux/kref.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27 27
28#define IN_CARD_SERVICES
29#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
30#include <pcmcia/cs.h> 29#include <pcmcia/cs.h>
31#include <pcmcia/cistpl.h> 30#include <pcmcia/cistpl.h>
@@ -741,9 +740,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
741 740
742static int pcmcia_card_add(struct pcmcia_socket *s) 741static int pcmcia_card_add(struct pcmcia_socket *s)
743{ 742{
744 cisinfo_t cisinfo;
745 cistpl_longlink_mfc_t mfc; 743 cistpl_longlink_mfc_t mfc;
746 unsigned int no_funcs, i; 744 unsigned int no_funcs, i, no_chains;
747 int ret = 0; 745 int ret = 0;
748 746
749 if (!(s->resource_setup_done)) { 747 if (!(s->resource_setup_done)) {
@@ -757,8 +755,8 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
757 return -EAGAIN; /* try again, but later... */ 755 return -EAGAIN; /* try again, but later... */
758 } 756 }
759 757
760 ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo); 758 ret = pccard_validate_cis(s, BIND_FN_ALL, &no_chains);
761 if (ret || !cisinfo.Chains) { 759 if (ret || !no_chains) {
762 ds_dbg(0, "invalid CIS or invalid resources\n"); 760 ds_dbg(0, "invalid CIS or invalid resources\n");
763 return -ENODEV; 761 return -ENODEV;
764 } 762 }
@@ -852,7 +850,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
852{ 850{
853 struct pcmcia_socket *s = dev->socket; 851 struct pcmcia_socket *s = dev->socket;
854 const struct firmware *fw; 852 const struct firmware *fw;
855 char path[20]; 853 char path[FIRMWARE_NAME_MAX];
856 int ret = -ENOMEM; 854 int ret = -ENOMEM;
857 int no_funcs; 855 int no_funcs;
858 int old_funcs; 856 int old_funcs;
@@ -864,7 +862,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
864 862
865 ds_dbg(1, "trying to load CIS file %s\n", filename); 863 ds_dbg(1, "trying to load CIS file %s\n", filename);
866 864
867 if (strlen(filename) > 14) { 865 if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) {
868 printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n", 866 printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n",
869 filename); 867 filename);
870 return -EINVAL; 868 return -EINVAL;
diff --git a/drivers/pcmcia/hd64465_ss.c b/drivers/pcmcia/hd64465_ss.c
index f2e810f53c81..fb2bc1fb015d 100644
--- a/drivers/pcmcia/hd64465_ss.c
+++ b/drivers/pcmcia/hd64465_ss.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: hd64465_ss.c,v 1.7 2003/07/06 14:42:50 lethal Exp $
3 *
4 * Device driver for the PCMCIA controller module of the 2 * Device driver for the PCMCIA controller module of the
5 * Hitachi HD64465 handheld companion chip. 3 * Hitachi HD64465 handheld companion chip.
6 * 4 *
@@ -48,7 +46,6 @@
48#include <pcmcia/cistpl.h> 46#include <pcmcia/cistpl.h>
49#include <pcmcia/ds.h> 47#include <pcmcia/ds.h>
50#include <pcmcia/ss.h> 48#include <pcmcia/ss.h>
51#include <pcmcia/bulkmem.h>
52#include "cs_internal.h" 49#include "cs_internal.h"
53 50
54#define MODNAME "hd64465_ss" 51#define MODNAME "hd64465_ss"
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index e13618656ff7..46561face128 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Author: Arjan Van De Ven <arjanv@redhat.com> 6 * Author: Arjan Van De Ven <arjanv@redhat.com>
7 * Loosly based on i82365.c from the pcmcia-cs package 7 * Loosly based on i82365.c from the pcmcia-cs package
8 *
9 * $Id: i82092aa.c,v 1.2 2001/10/23 14:43:34 arjanv Exp $
10 */ 8 */
11 9
12#include <linux/kernel.h> 10#include <linux/kernel.h>
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index b0d453303c5d..8836d393ad02 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
6/* $Id: i82092aa.h,v 1.1.1.1 2001/09/19 14:53:15 dwmw2 Exp $ */
7
8/* Debuging defines */ 6/* Debuging defines */
9#ifdef NOTRACE 7#ifdef NOTRACE
10#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) 8#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 32a2ab119798..68f6b2702bc4 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1263,7 +1263,7 @@ static int __init init_i82365(void)
1263 1263
1264 ret = driver_register(&i82365_driver); 1264 ret = driver_register(&i82365_driver);
1265 if (ret) 1265 if (ret)
1266 return ret; 1266 goto err_out;
1267 1267
1268 i82365_device = platform_device_alloc("i82365", 0); 1268 i82365_device = platform_device_alloc("i82365", 0);
1269 if (i82365_device) { 1269 if (i82365_device) {
@@ -1273,10 +1273,8 @@ static int __init init_i82365(void)
1273 } else 1273 } else
1274 ret = -ENOMEM; 1274 ret = -ENOMEM;
1275 1275
1276 if (ret) { 1276 if (ret)
1277 driver_unregister(&i82365_driver); 1277 goto err_driver_unregister;
1278 return ret;
1279 }
1280 1278
1281 printk(KERN_INFO "Intel ISA PCIC probe: "); 1279 printk(KERN_INFO "Intel ISA PCIC probe: ");
1282 sockets = 0; 1280 sockets = 0;
@@ -1285,16 +1283,17 @@ static int __init init_i82365(void)
1285 1283
1286 if (sockets == 0) { 1284 if (sockets == 0) {
1287 printk("not found.\n"); 1285 printk("not found.\n");
1288 platform_device_unregister(i82365_device); 1286 ret = -ENODEV;
1289 release_region(i365_base, 2); 1287 goto err_dev_unregister;
1290 driver_unregister(&i82365_driver);
1291 return -ENODEV;
1292 } 1288 }
1293 1289
1294 /* Set up interrupt handler(s) */ 1290 /* Set up interrupt handler(s) */
1295 if (grab_irq != 0) 1291 if (grab_irq != 0)
1296 request_irq(cs_irq, pcic_interrupt, 0, "i82365", pcic_interrupt); 1292 ret = request_irq(cs_irq, pcic_interrupt, 0, "i82365", pcic_interrupt);
1297 1293
1294 if (ret)
1295 goto err_socket_release;
1296
1298 /* register sockets with the pcmcia core */ 1297 /* register sockets with the pcmcia core */
1299 for (i = 0; i < sockets; i++) { 1298 for (i = 0; i < sockets; i++) {
1300 socket[i].socket.dev.parent = &i82365_device->dev; 1299 socket[i].socket.dev.parent = &i82365_device->dev;
@@ -1324,7 +1323,23 @@ static int __init init_i82365(void)
1324 } 1323 }
1325 1324
1326 return 0; 1325 return 0;
1327 1326err_socket_release:
1327 for (i = 0; i < sockets; i++) {
1328 /* Turn off all interrupt sources! */
1329 i365_set(i, I365_CSCINT, 0);
1330 release_region(socket[i].ioaddr, 2);
1331 }
1332err_dev_unregister:
1333 platform_device_unregister(i82365_device);
1334 release_region(i365_base, 2);
1335#ifdef CONFIG_PNP
1336 if (i82365_pnpdev)
1337 pnp_disable_dev(i82365_pnpdev);
1338#endif
1339err_driver_unregister:
1340 driver_unregister(&i82365_driver);
1341err_out:
1342 return ret;
1328} /* init_i82365 */ 1343} /* init_i82365 */
1329 1344
1330static void __exit exit_i82365(void) 1345static void __exit exit_i82365(void)
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 9c5be9a2f3fd..ff66604e90d4 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series. 2 * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series.
3 * 3 *
4 * (C) 1999-2000 Magnus Damm <damm@bitsmart.com> 4 * (C) 1999-2000 Magnus Damm <damm@opensource.se>
5 * (C) 2001-2002 Montavista Software, Inc. 5 * (C) 2001-2002 Montavista Software, Inc.
6 * <mlocke@mvista.com> 6 * <mlocke@mvista.com>
7 * 7 *
@@ -60,7 +60,6 @@
60#include <asm/irq.h> 60#include <asm/irq.h>
61#include <asm/fs_pd.h> 61#include <asm/fs_pd.h>
62 62
63#include <pcmcia/version.h>
64#include <pcmcia/cs_types.h> 63#include <pcmcia/cs_types.h>
65#include <pcmcia/cs.h> 64#include <pcmcia/cs.h>
66#include <pcmcia/ss.h> 65#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 46314b420765..569b746b5731 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -38,19 +38,19 @@
38#define CF_BASE 0xfffe2800 38#define CF_BASE 0xfffe2800
39 39
40/* status; read after IRQ */ 40/* status; read after IRQ */
41#define CF_STATUS_REG __REG16(CF_BASE + 0x00) 41#define CF_STATUS (CF_BASE + 0x00)
42# define CF_STATUS_BAD_READ (1 << 2) 42# define CF_STATUS_BAD_READ (1 << 2)
43# define CF_STATUS_BAD_WRITE (1 << 1) 43# define CF_STATUS_BAD_WRITE (1 << 1)
44# define CF_STATUS_CARD_DETECT (1 << 0) 44# define CF_STATUS_CARD_DETECT (1 << 0)
45 45
46/* which chipselect (CS0..CS3) is used for CF (active low) */ 46/* which chipselect (CS0..CS3) is used for CF (active low) */
47#define CF_CFG_REG __REG16(CF_BASE + 0x02) 47#define CF_CFG (CF_BASE + 0x02)
48 48
49/* card reset */ 49/* card reset */
50#define CF_CONTROL_REG __REG16(CF_BASE + 0x04) 50#define CF_CONTROL (CF_BASE + 0x04)
51# define CF_CONTROL_RESET (1 << 0) 51# define CF_CONTROL_RESET (1 << 0)
52 52
53#define omap_cf_present() (!(CF_STATUS_REG & CF_STATUS_CARD_DETECT)) 53#define omap_cf_present() (!(omap_readw(CF_STATUS) & CF_STATUS_CARD_DETECT))
54 54
55/*--------------------------------------------------------------------------*/ 55/*--------------------------------------------------------------------------*/
56 56
@@ -139,11 +139,11 @@ omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
139 return -EINVAL; 139 return -EINVAL;
140 } 140 }
141 141
142 control = CF_CONTROL_REG; 142 control = omap_readw(CF_CONTROL);
143 if (s->flags & SS_RESET) 143 if (s->flags & SS_RESET)
144 CF_CONTROL_REG = CF_CONTROL_RESET; 144 omap_writew(CF_CONTROL_RESET, CF_CONTROL);
145 else 145 else
146 CF_CONTROL_REG = 0; 146 omap_writew(0, CF_CONTROL);
147 147
148 pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n", 148 pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
149 driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask); 149 driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
@@ -270,7 +270,7 @@ static int __init omap_cf_probe(struct platform_device *pdev)
270 omap_cfg_reg(V10_1610_CF_IREQ); 270 omap_cfg_reg(V10_1610_CF_IREQ);
271 omap_cfg_reg(W10_1610_CF_RESET); 271 omap_cfg_reg(W10_1610_CF_RESET);
272 272
273 CF_CFG_REG = ~(1 << seg); 273 omap_writew(~(1 << seg), CF_CFG);
274 274
275 pr_info("%s: cs%d on irq %d\n", driver_name, seg, irq); 275 pr_info("%s: cs%d on irq %d\n", driver_name, seg, irq);
276 276
@@ -279,14 +279,15 @@ static int __init omap_cf_probe(struct platform_device *pdev)
279 * CF/PCMCIA variants... 279 * CF/PCMCIA variants...
280 */ 280 */
281 pr_debug("%s: cs%d, previous ccs %08x acs %08x\n", driver_name, 281 pr_debug("%s: cs%d, previous ccs %08x acs %08x\n", driver_name,
282 seg, EMIFS_CCS(seg), EMIFS_ACS(seg)); 282 seg, omap_readl(EMIFS_CCS(seg)), omap_readl(EMIFS_ACS(seg)));
283 EMIFS_CCS(seg) = 0x0004a1b3; /* synch mode 4 etc */ 283 omap_writel(0x0004a1b3, EMIFS_CCS(seg)); /* synch mode 4 etc */
284 EMIFS_ACS(seg) = 0x00000000; /* OE hold/setup */ 284 omap_writel(0x00000000, EMIFS_ACS(seg)); /* OE hold/setup */
285 285
286 /* CF uses armxor_ck, which is "always" available */ 286 /* CF uses armxor_ck, which is "always" available */
287 287
288 pr_debug("%s: sts %04x cfg %04x control %04x %s\n", driver_name, 288 pr_debug("%s: sts %04x cfg %04x control %04x %s\n", driver_name,
289 CF_STATUS_REG, CF_CFG_REG, CF_CONTROL_REG, 289 omap_readw(CF_STATUS), omap_readw(CF_CFG),
290 omap_readw(CF_CONTROL),
290 omap_cf_present() ? "present" : "(not present)"); 291 omap_cf_present() ? "present" : "(not present)");
291 292
292 cf->socket.owner = THIS_MODULE; 293 cf->socket.owner = THIS_MODULE;
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 5f186abca108..419f97fc9a62 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -27,12 +27,13 @@
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/poll.h> 28#include <linux/poll.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/smp_lock.h>
30#include <linux/workqueue.h> 31#include <linux/workqueue.h>
31 32
32#define IN_CARD_SERVICES
33#include <pcmcia/cs_types.h> 33#include <pcmcia/cs_types.h>
34#include <pcmcia/cs.h> 34#include <pcmcia/cs.h>
35#include <pcmcia/cistpl.h> 35#include <pcmcia/cistpl.h>
36#include <pcmcia/cisreg.h>
36#include <pcmcia/ds.h> 37#include <pcmcia/ds.h>
37#include <pcmcia/ss.h> 38#include <pcmcia/ss.h>
38 39
@@ -138,6 +139,154 @@ static int proc_read_drivers(char *buf, char **start, off_t pos,
138} 139}
139#endif 140#endif
140 141
142
143#ifdef CONFIG_PCMCIA_PROBE
144
145static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
146{
147 int irq;
148 u32 mask;
149
150 irq = adj->resource.irq.IRQ;
151 if ((irq < 0) || (irq > 15))
152 return CS_BAD_IRQ;
153
154 if (adj->Action != REMOVE_MANAGED_RESOURCE)
155 return 0;
156
157 mask = 1 << irq;
158
159 if (!(s->irq_mask & mask))
160 return 0;
161
162 s->irq_mask &= ~mask;
163
164 return 0;
165}
166
167#else
168
169static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
170 return CS_SUCCESS;
171}
172
173#endif
174
175static int pcmcia_adjust_resource_info(adjust_t *adj)
176{
177 struct pcmcia_socket *s;
178 int ret = CS_UNSUPPORTED_FUNCTION;
179 unsigned long flags;
180
181 down_read(&pcmcia_socket_list_rwsem);
182 list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
183
184 if (adj->Resource == RES_IRQ)
185 ret = adjust_irq(s, adj);
186
187 else if (s->resource_ops->add_io) {
188 unsigned long begin, end;
189
190 /* you can't use the old interface if the new
191 * one was used before */
192 spin_lock_irqsave(&s->lock, flags);
193 if ((s->resource_setup_new) &&
194 !(s->resource_setup_old)) {
195 spin_unlock_irqrestore(&s->lock, flags);
196 continue;
197 } else if (!(s->resource_setup_old))
198 s->resource_setup_old = 1;
199 spin_unlock_irqrestore(&s->lock, flags);
200
201 switch (adj->Resource) {
202 case RES_MEMORY_RANGE:
203 begin = adj->resource.memory.Base;
204 end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
205 if (s->resource_ops->add_mem)
206 ret =s->resource_ops->add_mem(s, adj->Action, begin, end);
207 case RES_IO_RANGE:
208 begin = adj->resource.io.BasePort;
209 end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
210 if (s->resource_ops->add_io)
211 ret = s->resource_ops->add_io(s, adj->Action, begin, end);
212 }
213 if (!ret) {
214 /* as there's no way we know this is the
215 * last call to adjust_resource_info, we
216 * always need to assume this is the latest
217 * one... */
218 spin_lock_irqsave(&s->lock, flags);
219 s->resource_setup_done = 1;
220 spin_unlock_irqrestore(&s->lock, flags);
221 }
222 }
223 }
224 up_read(&pcmcia_socket_list_rwsem);
225
226 return (ret);
227}
228
229/** pccard_get_status
230 *
231 * Get the current socket state bits. We don't support the latched
232 * SocketState yet: I haven't seen any point for it.
233 */
234
235static int pccard_get_status(struct pcmcia_socket *s,
236 struct pcmcia_device *p_dev,
237 cs_status_t *status)
238{
239 config_t *c;
240 int val;
241
242 s->ops->get_status(s, &val);
243 status->CardState = status->SocketState = 0;
244 status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
245 status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
246 status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
247 status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
248 if (s->state & SOCKET_SUSPEND)
249 status->CardState |= CS_EVENT_PM_SUSPEND;
250 if (!(s->state & SOCKET_PRESENT))
251 return CS_NO_CARD;
252
253 c = (p_dev) ? p_dev->function_config : NULL;
254
255 if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
256 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
257 u_char reg;
258 if (c->CardValues & PRESENT_PIN_REPLACE) {
259 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
260 status->CardState |=
261 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
262 status->CardState |=
263 (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
264 status->CardState |=
265 (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
266 status->CardState |=
267 (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
268 } else {
269 /* No PRR? Then assume we're always ready */
270 status->CardState |= CS_EVENT_READY_CHANGE;
271 }
272 if (c->CardValues & PRESENT_EXT_STATUS) {
273 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
274 status->CardState |=
275 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
276 }
277 return CS_SUCCESS;
278 }
279 status->CardState |=
280 (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
281 status->CardState |=
282 (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
283 status->CardState |=
284 (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
285 status->CardState |=
286 (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
287 return CS_SUCCESS;
288} /* pccard_get_status */
289
141/*====================================================================== 290/*======================================================================
142 291
143 These manage a ring buffer of events pending for one user process 292 These manage a ring buffer of events pending for one user process
@@ -397,20 +546,27 @@ static int ds_open(struct inode *inode, struct file *file)
397 struct pcmcia_socket *s; 546 struct pcmcia_socket *s;
398 user_info_t *user; 547 user_info_t *user;
399 static int warning_printed = 0; 548 static int warning_printed = 0;
549 int ret = 0;
400 550
401 ds_dbg(0, "ds_open(socket %d)\n", i); 551 ds_dbg(0, "ds_open(socket %d)\n", i);
402 552
553 lock_kernel();
403 s = pcmcia_get_socket_by_nr(i); 554 s = pcmcia_get_socket_by_nr(i);
404 if (!s) 555 if (!s) {
405 return -ENODEV; 556 ret = -ENODEV;
557 goto out;
558 }
406 s = pcmcia_get_socket(s); 559 s = pcmcia_get_socket(s);
407 if (!s) 560 if (!s) {
408 return -ENODEV; 561 ret = -ENODEV;
562 goto out;
563 }
409 564
410 if ((file->f_flags & O_ACCMODE) != O_RDONLY) { 565 if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
411 if (s->pcmcia_state.busy) { 566 if (s->pcmcia_state.busy) {
412 pcmcia_put_socket(s); 567 pcmcia_put_socket(s);
413 return -EBUSY; 568 ret = -EBUSY;
569 goto out;
414 } 570 }
415 else 571 else
416 s->pcmcia_state.busy = 1; 572 s->pcmcia_state.busy = 1;
@@ -419,7 +575,8 @@ static int ds_open(struct inode *inode, struct file *file)
419 user = kmalloc(sizeof(user_info_t), GFP_KERNEL); 575 user = kmalloc(sizeof(user_info_t), GFP_KERNEL);
420 if (!user) { 576 if (!user) {
421 pcmcia_put_socket(s); 577 pcmcia_put_socket(s);
422 return -ENOMEM; 578 ret = -ENOMEM;
579 goto out;
423 } 580 }
424 user->event_tail = user->event_head = 0; 581 user->event_tail = user->event_head = 0;
425 user->next = s->user; 582 user->next = s->user;
@@ -441,7 +598,9 @@ static int ds_open(struct inode *inode, struct file *file)
441 598
442 if (s->pcmcia_state.present) 599 if (s->pcmcia_state.present)
443 queue_event(user, CS_EVENT_CARD_INSERTION); 600 queue_event(user, CS_EVENT_CARD_INSERTION);
444 return 0; 601out:
602 unlock_kernel();
603 return ret;
445} /* ds_open */ 604} /* ds_open */
446 605
447/*====================================================================*/ 606/*====================================================================*/
@@ -546,8 +705,6 @@ static u_int ds_poll(struct file *file, poll_table *wait)
546 705
547/*====================================================================*/ 706/*====================================================================*/
548 707
549extern int pcmcia_adjust_resource_info(adjust_t *adj);
550
551static int ds_ioctl(struct inode * inode, struct file * file, 708static int ds_ioctl(struct inode * inode, struct file * file,
552 u_int cmd, u_long arg) 709 u_int cmd, u_long arg)
553{ 710{
@@ -649,7 +806,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
649 mutex_lock(&s->skt_mutex); 806 mutex_lock(&s->skt_mutex);
650 pcmcia_validate_mem(s); 807 pcmcia_validate_mem(s);
651 mutex_unlock(&s->skt_mutex); 808 mutex_unlock(&s->skt_mutex);
652 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo); 809 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo.Chains);
653 break; 810 break;
654 case DS_SUSPEND_CARD: 811 case DS_SUSPEND_CARD:
655 ret = pcmcia_suspend_card(s); 812 ret = pcmcia_suspend_card(s);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 1d128fbd1a92..4884a18cf9e6 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -21,11 +21,9 @@
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/device.h> 22#include <linux/device.h>
23 23
24#define IN_CARD_SERVICES
25#include <pcmcia/cs_types.h> 24#include <pcmcia/cs_types.h>
26#include <pcmcia/ss.h> 25#include <pcmcia/ss.h>
27#include <pcmcia/cs.h> 26#include <pcmcia/cs.h>
28#include <pcmcia/bulkmem.h>
29#include <pcmcia/cistpl.h> 27#include <pcmcia/cistpl.h>
30#include <pcmcia/cisreg.h> 28#include <pcmcia/cisreg.h>
31#include <pcmcia/ds.h> 29#include <pcmcia/ds.h>
@@ -311,74 +309,6 @@ int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle,
311EXPORT_SYMBOL(pcmcia_get_window); 309EXPORT_SYMBOL(pcmcia_get_window);
312 310
313 311
314/** pccard_get_status
315 *
316 * Get the current socket state bits. We don't support the latched
317 * SocketState yet: I haven't seen any point for it.
318 */
319
320int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev,
321 cs_status_t *status)
322{
323 config_t *c;
324 int val;
325
326 s->ops->get_status(s, &val);
327 status->CardState = status->SocketState = 0;
328 status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
329 status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
330 status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
331 status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
332 if (s->state & SOCKET_SUSPEND)
333 status->CardState |= CS_EVENT_PM_SUSPEND;
334 if (!(s->state & SOCKET_PRESENT))
335 return CS_NO_CARD;
336
337 c = (p_dev) ? p_dev->function_config : NULL;
338
339 if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
340 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
341 u_char reg;
342 if (c->CardValues & PRESENT_PIN_REPLACE) {
343 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
344 status->CardState |=
345 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
346 status->CardState |=
347 (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
348 status->CardState |=
349 (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
350 status->CardState |=
351 (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
352 } else {
353 /* No PRR? Then assume we're always ready */
354 status->CardState |= CS_EVENT_READY_CHANGE;
355 }
356 if (c->CardValues & PRESENT_EXT_STATUS) {
357 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
358 status->CardState |=
359 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
360 }
361 return CS_SUCCESS;
362 }
363 status->CardState |=
364 (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
365 status->CardState |=
366 (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
367 status->CardState |=
368 (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
369 status->CardState |=
370 (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
371 return CS_SUCCESS;
372} /* pccard_get_status */
373
374int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status)
375{
376 return pccard_get_status(p_dev->socket, p_dev, status);
377}
378EXPORT_SYMBOL(pcmcia_get_status);
379
380
381
382/** pcmcia_get_mem_page 312/** pcmcia_get_mem_page
383 * 313 *
384 * Change the card address of an already open memory window. 314 * Change the card address of an already open memory window.
@@ -812,6 +742,15 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
812 type = IRQF_SHARED; 742 type = IRQF_SHARED;
813 743
814#ifdef CONFIG_PCMCIA_PROBE 744#ifdef CONFIG_PCMCIA_PROBE
745
746#ifdef IRQ_NOAUTOEN
747 /* if the underlying IRQ infrastructure allows for it, only allocate
748 * the IRQ, but do not enable it
749 */
750 if (!(req->Attributes & IRQ_HANDLE_PRESENT))
751 type |= IRQ_NOAUTOEN;
752#endif /* IRQ_NOAUTOEN */
753
815 if (s->irq.AssignedIRQ != 0) { 754 if (s->irq.AssignedIRQ != 0) {
816 /* If the interrupt is already assigned, it must be the same */ 755 /* If the interrupt is already assigned, it must be the same */
817 irq = s->irq.AssignedIRQ; 756 irq = s->irq.AssignedIRQ;
@@ -966,7 +905,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev) {
966 pcmcia_release_configuration(p_dev); 905 pcmcia_release_configuration(p_dev);
967 pcmcia_release_io(p_dev, &p_dev->io); 906 pcmcia_release_io(p_dev, &p_dev->io);
968 pcmcia_release_irq(p_dev, &p_dev->irq); 907 pcmcia_release_irq(p_dev, &p_dev->irq);
969 if (&p_dev->win) 908 if (p_dev->win)
970 pcmcia_release_window(p_dev->win); 909 pcmcia_release_window(p_dev->win);
971} 910}
972EXPORT_SYMBOL(pcmcia_disable_device); 911EXPORT_SYMBOL(pcmcia_disable_device);
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 9414163c78e7..ccfdf1969a7f 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -33,7 +33,6 @@
33 33
34#include <pcmcia/cs_types.h> 34#include <pcmcia/cs_types.h>
35#include <pcmcia/ss.h> 35#include <pcmcia/ss.h>
36#include <pcmcia/bulkmem.h>
37#include <pcmcia/cistpl.h> 36#include <pcmcia/cistpl.h>
38 37
39#include "cs_internal.h" 38#include "cs_internal.h"
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index e7ab060ff118..f123fce65f2e 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -18,6 +18,7 @@
18 18
19#include <pcmcia/ss.h> 19#include <pcmcia/ss.h>
20#include <asm/hardware.h> 20#include <asm/hardware.h>
21#include <asm/mach-types.h>
21 22
22#include <asm/arch/pxa-regs.h> 23#include <asm/arch/pxa-regs.h>
23#include <asm/arch/pxa2xx-gpio.h> 24#include <asm/arch/pxa2xx-gpio.h>
@@ -130,7 +131,7 @@ static void cmx270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
130} 131}
131 132
132 133
133static struct pcmcia_low_level cmx270_pcmcia_ops = { 134static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
134 .owner = THIS_MODULE, 135 .owner = THIS_MODULE,
135 .hw_init = cmx270_pcmcia_hw_init, 136 .hw_init = cmx270_pcmcia_hw_init,
136 .hw_shutdown = cmx270_pcmcia_shutdown, 137 .hw_shutdown = cmx270_pcmcia_shutdown,
@@ -147,15 +148,21 @@ static int __init cmx270_pcmcia_init(void)
147{ 148{
148 int ret; 149 int ret;
149 150
151 if (!machine_is_armcore())
152 return -ENODEV;
153
150 cmx270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 154 cmx270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
151 155
152 if (!cmx270_pcmcia_device) 156 if (!cmx270_pcmcia_device)
153 return -ENOMEM; 157 return -ENOMEM;
154 158
155 cmx270_pcmcia_device->dev.platform_data = &cmx270_pcmcia_ops; 159 ret = platform_device_add_data(cmx270_pcmcia_device, &cmx270_pcmcia_ops,
160 sizeof(cmx270_pcmcia_ops));
156 161
157 printk(KERN_INFO "Registering cm-x270 PCMCIA interface.\n"); 162 if (ret == 0) {
158 ret = platform_device_add(cmx270_pcmcia_device); 163 printk(KERN_INFO "Registering cm-x270 PCMCIA interface.\n");
164 ret = platform_device_add(cmx270_pcmcia_device);
165 }
159 166
160 if (ret) 167 if (ret)
161 platform_device_put(cmx270_pcmcia_device); 168 platform_device_put(cmx270_pcmcia_device);
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 145b85e0f02c..92d1cc33808c 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -22,6 +22,7 @@
22#include <pcmcia/ss.h> 22#include <pcmcia/ss.h>
23 23
24#include <asm/hardware.h> 24#include <asm/hardware.h>
25#include <asm/mach-types.h>
25#include <asm/irq.h> 26#include <asm/irq.h>
26 27
27#include <asm/arch/pxa-regs.h> 28#include <asm/arch/pxa-regs.h>
@@ -136,7 +137,7 @@ static void mst_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
136{ 137{
137} 138}
138 139
139static struct pcmcia_low_level mst_pcmcia_ops = { 140static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
140 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
141 .hw_init = mst_pcmcia_hw_init, 142 .hw_init = mst_pcmcia_hw_init,
142 .hw_shutdown = mst_pcmcia_hw_shutdown, 143 .hw_shutdown = mst_pcmcia_hw_shutdown,
@@ -153,13 +154,17 @@ static int __init mst_pcmcia_init(void)
153{ 154{
154 int ret; 155 int ret;
155 156
157 if (!machine_is_mainstone())
158 return -ENODEV;
159
156 mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 160 mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
157 if (!mst_pcmcia_device) 161 if (!mst_pcmcia_device)
158 return -ENOMEM; 162 return -ENOMEM;
159 163
160 mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops; 164 ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops,
161 165 sizeof(mst_pcmcia_ops));
162 ret = platform_device_add(mst_pcmcia_device); 166 if (ret == 0)
167 ret = platform_device_add(mst_pcmcia_device);
163 168
164 if (ret) 169 if (ret)
165 platform_device_put(mst_pcmcia_device); 170 platform_device_put(mst_pcmcia_device);
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index d5c33bd78d68..d71f93d45833 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -222,7 +222,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
222 sharpsl_pcmcia_init_reset(skt); 222 sharpsl_pcmcia_init_reset(skt);
223} 223}
224 224
225static struct pcmcia_low_level sharpsl_pcmcia_ops = { 225static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .hw_init = sharpsl_pcmcia_hw_init, 227 .hw_init = sharpsl_pcmcia_hw_init,
228 .hw_shutdown = sharpsl_pcmcia_hw_shutdown, 228 .hw_shutdown = sharpsl_pcmcia_hw_shutdown,
@@ -261,10 +261,12 @@ static int __init sharpsl_pcmcia_init(void)
261 if (!sharpsl_pcmcia_device) 261 if (!sharpsl_pcmcia_device)
262 return -ENOMEM; 262 return -ENOMEM;
263 263
264 sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops; 264 ret = platform_device_add_data(sharpsl_pcmcia_device,
265 sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev; 265 &sharpsl_pcmcia_ops, sizeof(sharpsl_pcmcia_ops));
266 266 if (ret == 0) {
267 ret = platform_device_add(sharpsl_pcmcia_device); 267 sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev;
268 ret = platform_device_add(sharpsl_pcmcia_device);
269 }
268 270
269 if (ret) 271 if (ret)
270 platform_device_put(sharpsl_pcmcia_device); 272 platform_device_put(sharpsl_pcmcia_device);
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index ce2226273aaa..c0e2afc79e3e 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -21,86 +21,6 @@
21#include "cs_internal.h" 21#include "cs_internal.h"
22 22
23 23
24#ifdef CONFIG_PCMCIA_IOCTL
25
26#ifdef CONFIG_PCMCIA_PROBE
27
28static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
29{
30 int irq;
31 u32 mask;
32
33 irq = adj->resource.irq.IRQ;
34 if ((irq < 0) || (irq > 15))
35 return CS_BAD_IRQ;
36
37 if (adj->Action != REMOVE_MANAGED_RESOURCE)
38 return 0;
39
40 mask = 1 << irq;
41
42 if (!(s->irq_mask & mask))
43 return 0;
44
45 s->irq_mask &= ~mask;
46
47 return 0;
48}
49
50#else
51
52static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
53 return CS_SUCCESS;
54}
55
56#endif
57
58
59int pcmcia_adjust_resource_info(adjust_t *adj)
60{
61 struct pcmcia_socket *s;
62 int ret = CS_UNSUPPORTED_FUNCTION;
63 unsigned long flags;
64
65 down_read(&pcmcia_socket_list_rwsem);
66 list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
67
68 if (adj->Resource == RES_IRQ)
69 ret = adjust_irq(s, adj);
70
71 else if (s->resource_ops->adjust_resource) {
72
73 /* you can't use the old interface if the new
74 * one was used before */
75 spin_lock_irqsave(&s->lock, flags);
76 if ((s->resource_setup_new) &&
77 !(s->resource_setup_old)) {
78 spin_unlock_irqrestore(&s->lock, flags);
79 continue;
80 } else if (!(s->resource_setup_old))
81 s->resource_setup_old = 1;
82 spin_unlock_irqrestore(&s->lock, flags);
83
84 ret = s->resource_ops->adjust_resource(s, adj);
85 if (!ret) {
86 /* as there's no way we know this is the
87 * last call to adjust_resource_info, we
88 * always need to assume this is the latest
89 * one... */
90 spin_lock_irqsave(&s->lock, flags);
91 s->resource_setup_done = 1;
92 spin_unlock_irqrestore(&s->lock, flags);
93 }
94 }
95 }
96 up_read(&pcmcia_socket_list_rwsem);
97
98 return (ret);
99}
100EXPORT_SYMBOL(pcmcia_adjust_resource_info);
101
102#endif
103
104int pcmcia_validate_mem(struct pcmcia_socket *s) 24int pcmcia_validate_mem(struct pcmcia_socket *s)
105{ 25{
106 if (s->resource_ops->validate_mem) 26 if (s->resource_ops->validate_mem)
@@ -164,7 +84,8 @@ struct pccard_resource_ops pccard_static_ops = {
164 .adjust_io_region = NULL, 84 .adjust_io_region = NULL,
165 .find_io = NULL, 85 .find_io = NULL,
166 .find_mem = NULL, 86 .find_mem = NULL,
167 .adjust_resource = NULL, 87 .add_io = NULL,
88 .add_mem = NULL,
168 .init = static_init, 89 .init = static_init,
169 .exit = NULL, 90 .exit = NULL,
170}; 91};
@@ -264,7 +185,8 @@ struct pccard_resource_ops pccard_iodyn_ops = {
264 .adjust_io_region = iodyn_adjust_io_region, 185 .adjust_io_region = iodyn_adjust_io_region,
265 .find_io = iodyn_find_io_region, 186 .find_io = iodyn_find_io_region,
266 .find_mem = NULL, 187 .find_mem = NULL,
267 .adjust_resource = NULL, 188 .add_io = NULL,
189 .add_mem = NULL,
268 .init = static_init, 190 .init = static_init,
269 .exit = NULL, 191 .exit = NULL,
270}; 192};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 0fcf763b9175..d0c1d63d1891 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -31,7 +31,6 @@
31#include <pcmcia/cs_types.h> 31#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h> 32#include <pcmcia/ss.h>
33#include <pcmcia/cs.h> 33#include <pcmcia/cs.h>
34#include <pcmcia/bulkmem.h>
35#include <pcmcia/cistpl.h> 34#include <pcmcia/cistpl.h>
36#include "cs_internal.h" 35#include "cs_internal.h"
37 36
@@ -261,21 +260,22 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
261======================================================================*/ 260======================================================================*/
262 261
263/* Validation function for cards with a valid CIS */ 262/* Validation function for cards with a valid CIS */
264static int readable(struct pcmcia_socket *s, struct resource *res, cisinfo_t *info) 263static int readable(struct pcmcia_socket *s, struct resource *res,
264 unsigned int *count)
265{ 265{
266 int ret = -1; 266 int ret = -1;
267 267
268 s->cis_mem.res = res; 268 s->cis_mem.res = res;
269 s->cis_virt = ioremap(res->start, s->map_size); 269 s->cis_virt = ioremap(res->start, s->map_size);
270 if (s->cis_virt) { 270 if (s->cis_virt) {
271 ret = pccard_validate_cis(s, BIND_FN_ALL, info); 271 ret = pccard_validate_cis(s, BIND_FN_ALL, count);
272 /* invalidate mapping and CIS cache */ 272 /* invalidate mapping and CIS cache */
273 iounmap(s->cis_virt); 273 iounmap(s->cis_virt);
274 s->cis_virt = NULL; 274 s->cis_virt = NULL;
275 destroy_cis_cache(s); 275 destroy_cis_cache(s);
276 } 276 }
277 s->cis_mem.res = NULL; 277 s->cis_mem.res = NULL;
278 if ((ret != 0) || (info->Chains == 0)) 278 if ((ret != 0) || (count == 0))
279 return 0; 279 return 0;
280 return 1; 280 return 1;
281} 281}
@@ -316,7 +316,7 @@ static int
316cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size) 316cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
317{ 317{
318 struct resource *res1, *res2; 318 struct resource *res1, *res2;
319 cisinfo_t info1, info2; 319 unsigned int info1, info2;
320 int ret = 0; 320 int ret = 0;
321 321
322 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe"); 322 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
@@ -330,7 +330,7 @@ cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
330 free_region(res2); 330 free_region(res2);
331 free_region(res1); 331 free_region(res1);
332 332
333 return (ret == 2) && (info1.Chains == info2.Chains); 333 return (ret == 2) && (info1 == info2);
334} 334}
335 335
336static int 336static int
@@ -766,21 +766,6 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
766} 766}
767 767
768 768
769static int nonstatic_adjust_resource_info(struct pcmcia_socket *s, adjust_t *adj)
770{
771 unsigned long end;
772
773 switch (adj->Resource) {
774 case RES_MEMORY_RANGE:
775 end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
776 return adjust_memory(s, adj->Action, adj->resource.memory.Base, end);
777 case RES_IO_RANGE:
778 end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
779 return adjust_io(s, adj->Action, adj->resource.io.BasePort, end);
780 }
781 return CS_UNSUPPORTED_FUNCTION;
782}
783
784#ifdef CONFIG_PCI 769#ifdef CONFIG_PCI
785static int nonstatic_autoadd_resources(struct pcmcia_socket *s) 770static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
786{ 771{
@@ -889,7 +874,8 @@ struct pccard_resource_ops pccard_nonstatic_ops = {
889 .adjust_io_region = nonstatic_adjust_io_region, 874 .adjust_io_region = nonstatic_adjust_io_region,
890 .find_io = nonstatic_find_io_region, 875 .find_io = nonstatic_find_io_region,
891 .find_mem = nonstatic_find_mem_region, 876 .find_mem = nonstatic_find_mem_region,
892 .adjust_resource = nonstatic_adjust_resource_info, 877 .add_io = adjust_io,
878 .add_mem = adjust_memory,
893 .init = nonstatic_init, 879 .init = nonstatic_init,
894 .exit = nonstatic_release_resource_db, 880 .exit = nonstatic_release_resource_db,
895}; 881};
@@ -1008,41 +994,34 @@ static ssize_t store_mem_db(struct device *dev,
1008} 994}
1009static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); 995static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db);
1010 996
1011static struct device_attribute *pccard_rsrc_attributes[] = { 997static struct attribute *pccard_rsrc_attributes[] = {
1012 &dev_attr_available_resources_io, 998 &dev_attr_available_resources_io.attr,
1013 &dev_attr_available_resources_mem, 999 &dev_attr_available_resources_mem.attr,
1014 NULL, 1000 NULL,
1015}; 1001};
1016 1002
1003static const struct attribute_group rsrc_attributes = {
1004 .attrs = pccard_rsrc_attributes,
1005};
1006
1017static int __devinit pccard_sysfs_add_rsrc(struct device *dev, 1007static int __devinit pccard_sysfs_add_rsrc(struct device *dev,
1018 struct class_interface *class_intf) 1008 struct class_interface *class_intf)
1019{ 1009{
1020 struct pcmcia_socket *s = dev_get_drvdata(dev); 1010 struct pcmcia_socket *s = dev_get_drvdata(dev);
1021 struct device_attribute **attr; 1011
1022 int ret = 0;
1023 if (s->resource_ops != &pccard_nonstatic_ops) 1012 if (s->resource_ops != &pccard_nonstatic_ops)
1024 return 0; 1013 return 0;
1025 1014 return sysfs_create_group(&dev->kobj, &rsrc_attributes);
1026 for (attr = pccard_rsrc_attributes; *attr; attr++) {
1027 ret = device_create_file(dev, *attr);
1028 if (ret)
1029 break;
1030 }
1031
1032 return ret;
1033} 1015}
1034 1016
1035static void __devexit pccard_sysfs_remove_rsrc(struct device *dev, 1017static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
1036 struct class_interface *class_intf) 1018 struct class_interface *class_intf)
1037{ 1019{
1038 struct pcmcia_socket *s = dev_get_drvdata(dev); 1020 struct pcmcia_socket *s = dev_get_drvdata(dev);
1039 struct device_attribute **attr;
1040 1021
1041 if (s->resource_ops != &pccard_nonstatic_ops) 1022 if (s->resource_ops != &pccard_nonstatic_ops)
1042 return; 1023 return;
1043 1024 sysfs_remove_group(&dev->kobj, &rsrc_attributes);
1044 for (attr = pccard_rsrc_attributes; *attr; attr++)
1045 device_remove_file(dev, *attr);
1046} 1025}
1047 1026
1048static struct class_interface pccard_rsrc_interface __refdata = { 1027static struct class_interface pccard_rsrc_interface __refdata = {
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 1edc1da9d353..91ef6a0da3ab 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -14,7 +14,6 @@
14#include <pcmcia/cs_types.h> 14#include <pcmcia/cs_types.h>
15#include <pcmcia/cs.h> 15#include <pcmcia/cs.h>
16#include <pcmcia/ss.h> 16#include <pcmcia/ss.h>
17#include <pcmcia/bulkmem.h>
18#include <pcmcia/cistpl.h> 17#include <pcmcia/cistpl.h>
19#include "cs_internal.h" 18#include "cs_internal.h"
20 19
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 562384d6f321..006a29e91d83 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -27,11 +27,9 @@
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29 29
30#define IN_CARD_SERVICES
31#include <pcmcia/cs_types.h> 30#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h> 31#include <pcmcia/ss.h>
33#include <pcmcia/cs.h> 32#include <pcmcia/cs.h>
34#include <pcmcia/bulkmem.h>
35#include <pcmcia/cistpl.h> 33#include <pcmcia/cistpl.h>
36#include <pcmcia/cisreg.h> 34#include <pcmcia/cisreg.h>
37#include <pcmcia/ds.h> 35#include <pcmcia/ds.h>
@@ -293,7 +291,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
293 count = 0; 291 count = 0;
294 else { 292 else {
295 struct pcmcia_socket *s; 293 struct pcmcia_socket *s;
296 cisinfo_t cisinfo; 294 unsigned int chains;
297 295
298 if (off + count > size) 296 if (off + count > size)
299 count = size - off; 297 count = size - off;
@@ -302,9 +300,9 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
302 300
303 if (!(s->state & SOCKET_PRESENT)) 301 if (!(s->state & SOCKET_PRESENT))
304 return -ENODEV; 302 return -ENODEV;
305 if (pccard_validate_cis(s, BIND_FN_ALL, &cisinfo)) 303 if (pccard_validate_cis(s, BIND_FN_ALL, &chains))
306 return -EIO; 304 return -EIO;
307 if (!cisinfo.Chains) 305 if (!chains)
308 return -ENODATA; 306 return -ENODATA;
309 307
310 count = pccard_extract_cis(s, buf, off, count); 308 count = pccard_extract_cis(s, buf, off, count);
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index d29657bf1b40..129db7bd06c3 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -155,7 +155,7 @@
155#define ENE_TEST_C9_TLTENABLE 0x02 155#define ENE_TEST_C9_TLTENABLE 0x02
156#define ENE_TEST_C9_PFENABLE_F0 0x04 156#define ENE_TEST_C9_PFENABLE_F0 0x04
157#define ENE_TEST_C9_PFENABLE_F1 0x08 157#define ENE_TEST_C9_PFENABLE_F1 0x08
158#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0) 158#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F1)
159#define ENE_TEST_C9_WPDISALBLE_F0 0x40 159#define ENE_TEST_C9_WPDISALBLE_F0 0x40
160#define ENE_TEST_C9_WPDISALBLE_F1 0x80 160#define ENE_TEST_C9_WPDISALBLE_F1 0x80
161#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1) 161#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1)
@@ -692,7 +692,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
692 goto out; 692 goto out;
693 693
694 /* check state */ 694 /* check state */
695 yenta_get_status(&socket->socket, &state); 695 yenta_get_status(&slot2->socket, &state);
696 if (state & SS_DETECT) { 696 if (state & SS_DETECT) {
697 ret = 0; 697 ret = 0;
698 goto out; 698 goto out;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4949dc4859be..fc85bf2e4a97 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -469,6 +469,16 @@ config RTC_DRV_VR41XX
469 To compile this driver as a module, choose M here: the 469 To compile this driver as a module, choose M here: the
470 module will be called rtc-vr41xx. 470 module will be called rtc-vr41xx.
471 471
472config RTC_DRV_PL030
473 tristate "ARM AMBA PL030 RTC"
474 depends on ARM_AMBA
475 help
476 If you say Y here you will get access to ARM AMBA
477 PrimeCell PL030 RTC found on certain ARM SOCs.
478
479 To compile this driver as a module, choose M here: the
480 module will be called rtc-pl030.
481
472config RTC_DRV_PL031 482config RTC_DRV_PL031
473 tristate "ARM AMBA PL031 RTC" 483 tristate "ARM AMBA PL031 RTC"
474 depends on ARM_AMBA 484 depends on ARM_AMBA
@@ -495,12 +505,13 @@ config RTC_DRV_AT91RM9200
495 this is powered by the backup power supply. 505 this is powered by the backup power supply.
496 506
497config RTC_DRV_AT91SAM9 507config RTC_DRV_AT91SAM9
498 tristate "AT91SAM9x" 508 tristate "AT91SAM9x/AT91CAP9"
499 depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40) 509 depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40)
500 help 510 help
501 RTC driver for the Atmel AT91SAM9x internal RTT (Real Time Timer). 511 RTC driver for the Atmel AT91SAM9x and AT91CAP9 internal RTT
502 These timers are powered by the backup power supply (such as a 512 (Real Time Timer). These timers are powered by the backup power
503 small coin cell battery), but do not need to be used as RTCs. 513 supply (such as a small coin cell battery), but do not need to
514 be used as RTCs.
504 515
505 (On AT91SAM9rl chips you probably want to use the dedicated RTC 516 (On AT91SAM9rl chips you probably want to use the dedicated RTC
506 module and leave the RTT available for other uses.) 517 module and leave the RTT available for other uses.)
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b6e14d51670b..b5d9d67df887 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
41obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o 41obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
42obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o 42obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
43obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o 43obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
44obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
44obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o 45obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
45obj-$(CONFIG_RTC_DRV_PPC) += rtc-ppc.o 46obj-$(CONFIG_RTC_DRV_PPC) += rtc-ppc.o
46obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o 47obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 2ef8cdfda4a7..90b9a6503e15 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -265,6 +265,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
265 } 265 }
266 266
267 platform_set_drvdata(pdev, rtc); 267 platform_set_drvdata(pdev, rtc);
268 device_init_wakeup(&pdev->dev, 1);
268 269
269 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", 270 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
270 (unsigned long)rtc->regs, rtc->irq); 271 (unsigned long)rtc->regs, rtc->irq);
@@ -284,6 +285,8 @@ static int __exit at32_rtc_remove(struct platform_device *pdev)
284{ 285{
285 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev); 286 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
286 287
288 device_init_wakeup(&pdev->dev, 0);
289
287 free_irq(rtc->irq, rtc); 290 free_irq(rtc->irq, rtc);
288 iounmap(rtc->regs); 291 iounmap(rtc->regs);
289 rtc_device_unregister(rtc->rtc); 292 rtc_device_unregister(rtc->rtc);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 39e64ab1ecb7..9c3db934cc24 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -29,10 +29,6 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30 30
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/rtc.h>
33
34#include <asm/mach/time.h>
35
36#include <asm/arch/at91_rtc.h> 32#include <asm/arch/at91_rtc.h>
37 33
38 34
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 38d8742a4bdf..f0246ef413a4 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -19,7 +19,6 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
22#include <asm/mach/time.h>
23#include <asm/arch/board.h> 22#include <asm/arch/board.h>
24#include <asm/arch/at91_rtt.h> 23#include <asm/arch/at91_rtt.h>
25 24
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 90dfa0df747a..0114a78b7cbb 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/smp_lock.h>
16#include "rtc-core.h" 17#include "rtc-core.h"
17 18
18static dev_t rtc_devt; 19static dev_t rtc_devt;
@@ -26,8 +27,11 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
26 struct rtc_device, char_dev); 27 struct rtc_device, char_dev);
27 const struct rtc_class_ops *ops = rtc->ops; 28 const struct rtc_class_ops *ops = rtc->ops;
28 29
29 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) 30 lock_kernel();
30 return -EBUSY; 31 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) {
32 err = -EBUSY;
33 goto out;
34 }
31 35
32 file->private_data = rtc; 36 file->private_data = rtc;
33 37
@@ -37,11 +41,13 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
37 rtc->irq_data = 0; 41 rtc->irq_data = 0;
38 spin_unlock_irq(&rtc->irq_lock); 42 spin_unlock_irq(&rtc->irq_lock);
39 43
40 return 0; 44 goto out;
41 } 45 }
42 46
43 /* something has gone wrong */ 47 /* something has gone wrong */
44 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); 48 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
49out:
50 unlock_kernel();
45 return err; 51 return err;
46} 52}
47 53
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index a3e0880b38fb..0a19c06019be 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/smp_lock.h>
20#include <linux/string.h> 21#include <linux/string.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/rtc.h> 23#include <linux/rtc.h>
@@ -655,12 +656,16 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
655static int wdt_open(struct inode *inode, struct file *file) 656static int wdt_open(struct inode *inode, struct file *file)
656{ 657{
657 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) { 658 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
658 if (test_and_set_bit(0, &wdt_is_open)) 659 lock_kernel();
660 if (test_and_set_bit(0, &wdt_is_open)) {
661 unlock_kernel();
659 return -EBUSY; 662 return -EBUSY;
663 }
660 /* 664 /*
661 * Activate 665 * Activate
662 */ 666 */
663 wdt_is_open = 1; 667 wdt_is_open = 1;
668 unlock_kernel();
664 return 0; 669 return 0;
665 } 670 }
666 return -ENODEV; 671 return -ENODEV;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 58f81c774943..eb23d8423f42 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -22,7 +22,6 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23 23
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/mach/time.h>
26 25
27 26
28/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock 27/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
new file mode 100644
index 000000000000..8448eeb9d675
--- /dev/null
+++ b/drivers/rtc/rtc-pl030.c
@@ -0,0 +1,217 @@
1/*
2 * linux/drivers/rtc/rtc-pl030.c
3 *
4 * Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/rtc.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/amba/bus.h>
15#include <linux/io.h>
16
17#define RTC_DR (0)
18#define RTC_MR (4)
19#define RTC_STAT (8)
20#define RTC_EOI (8)
21#define RTC_LR (12)
22#define RTC_CR (16)
23#define RTC_CR_MIE (1 << 0)
24
25struct pl030_rtc {
26 struct rtc_device *rtc;
27 void __iomem *base;
28};
29
30static irqreturn_t pl030_interrupt(int irq, void *dev_id)
31{
32 struct pl030_rtc *rtc = dev_id;
33 writel(0, rtc->base + RTC_EOI);
34 return IRQ_HANDLED;
35}
36
37static int pl030_open(struct device *dev)
38{
39 return 0;
40}
41
42static void pl030_release(struct device *dev)
43{
44}
45
46static int pl030_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
47{
48 return -ENOIOCTLCMD;
49}
50
51static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
52{
53 struct pl030_rtc *rtc = dev_get_drvdata(dev);
54
55 rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
56 return 0;
57}
58
59static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
60{
61 struct pl030_rtc *rtc = dev_get_drvdata(dev);
62 unsigned long time;
63 int ret;
64
65 /*
66 * At the moment, we can only deal with non-wildcarded alarm times.
67 */
68 ret = rtc_valid_tm(&alrm->time);
69 if (ret == 0)
70 ret = rtc_tm_to_time(&alrm->time, &time);
71 if (ret == 0)
72 writel(time, rtc->base + RTC_MR);
73 return ret;
74}
75
76static int pl030_read_time(struct device *dev, struct rtc_time *tm)
77{
78 struct pl030_rtc *rtc = dev_get_drvdata(dev);
79
80 rtc_time_to_tm(readl(rtc->base + RTC_DR), tm);
81
82 return 0;
83}
84
85/*
86 * Set the RTC time. Unfortunately, we can't accurately set
87 * the point at which the counter updates.
88 *
89 * Also, since RTC_LR is transferred to RTC_CR on next rising
90 * edge of the 1Hz clock, we must write the time one second
91 * in advance.
92 */
93static int pl030_set_time(struct device *dev, struct rtc_time *tm)
94{
95 struct pl030_rtc *rtc = dev_get_drvdata(dev);
96 unsigned long time;
97 int ret;
98
99 ret = rtc_tm_to_time(tm, &time);
100 if (ret == 0)
101 writel(time + 1, rtc->base + RTC_LR);
102
103 return ret;
104}
105
106static const struct rtc_class_ops pl030_ops = {
107 .open = pl030_open,
108 .release = pl030_release,
109 .ioctl = pl030_ioctl,
110 .read_time = pl030_read_time,
111 .set_time = pl030_set_time,
112 .read_alarm = pl030_read_alarm,
113 .set_alarm = pl030_set_alarm,
114};
115
116static int pl030_probe(struct amba_device *dev, void *id)
117{
118 struct pl030_rtc *rtc;
119 int ret;
120
121 ret = amba_request_regions(dev, NULL);
122 if (ret)
123 goto err_req;
124
125 rtc = kmalloc(sizeof(*rtc), GFP_KERNEL);
126 if (!rtc) {
127 ret = -ENOMEM;
128 goto err_rtc;
129 }
130
131 rtc->base = ioremap(dev->res.start, SZ_4K);
132 if (!rtc->base) {
133 ret = -ENOMEM;
134 goto err_map;
135 }
136
137 __raw_writel(0, rtc->base + RTC_CR);
138 __raw_writel(0, rtc->base + RTC_EOI);
139
140 amba_set_drvdata(dev, rtc);
141
142 ret = request_irq(dev->irq[0], pl030_interrupt, IRQF_DISABLED,
143 "rtc-pl030", rtc);
144 if (ret)
145 goto err_irq;
146
147 rtc->rtc = rtc_device_register("pl030", &dev->dev, &pl030_ops,
148 THIS_MODULE);
149 if (IS_ERR(rtc->rtc)) {
150 ret = PTR_ERR(rtc->rtc);
151 goto err_reg;
152 }
153
154 return 0;
155
156 err_reg:
157 free_irq(dev->irq[0], rtc);
158 err_irq:
159 iounmap(rtc->base);
160 err_map:
161 kfree(rtc);
162 err_rtc:
163 amba_release_regions(dev);
164 err_req:
165 return ret;
166}
167
168static int pl030_remove(struct amba_device *dev)
169{
170 struct pl030_rtc *rtc = amba_get_drvdata(dev);
171
172 amba_set_drvdata(dev, NULL);
173
174 writel(0, rtc->base + RTC_CR);
175
176 free_irq(dev->irq[0], rtc);
177 rtc_device_unregister(rtc->rtc);
178 iounmap(rtc->base);
179 kfree(rtc);
180 amba_release_regions(dev);
181
182 return 0;
183}
184
185static struct amba_id pl030_ids[] = {
186 {
187 .id = 0x00041030,
188 .mask = 0x000fffff,
189 },
190 { 0, 0 },
191};
192
193static struct amba_driver pl030_driver = {
194 .drv = {
195 .name = "rtc-pl030",
196 },
197 .probe = pl030_probe,
198 .remove = pl030_remove,
199 .id_table = pl030_ids,
200};
201
202static int __init pl030_init(void)
203{
204 return amba_driver_register(&pl030_driver);
205}
206
207static void __exit pl030_exit(void)
208{
209 amba_driver_unregister(&pl030_driver);
210}
211
212module_init(pl030_init);
213module_exit(pl030_exit);
214
215MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
216MODULE_DESCRIPTION("ARM AMBA PL030 RTC Driver");
217MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 2fd49edcc712..08b4610ec5a6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -12,23 +12,12 @@
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15
16#include <linux/platform_device.h>
17#include <linux/module.h> 15#include <linux/module.h>
18#include <linux/rtc.h> 16#include <linux/rtc.h>
19#include <linux/init.h> 17#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/interrupt.h> 18#include <linux/interrupt.h>
22#include <linux/string.h>
23#include <linux/pm.h>
24#include <linux/bitops.h>
25
26#include <linux/amba/bus.h> 19#include <linux/amba/bus.h>
27 20#include <linux/io.h>
28#include <asm/io.h>
29#include <asm/hardware.h>
30#include <asm/irq.h>
31#include <asm/rtc.h>
32 21
33/* 22/*
34 * Register definitions 23 * Register definitions
@@ -142,13 +131,12 @@ static int pl031_remove(struct amba_device *adev)
142{ 131{
143 struct pl031_local *ldata = dev_get_drvdata(&adev->dev); 132 struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
144 133
145 if (ldata) { 134 amba_set_drvdata(adev, NULL);
146 dev_set_drvdata(&adev->dev, NULL); 135 free_irq(adev->irq[0], ldata->rtc);
147 free_irq(adev->irq[0], ldata->rtc); 136 rtc_device_unregister(ldata->rtc);
148 rtc_device_unregister(ldata->rtc); 137 iounmap(ldata->base);
149 iounmap(ldata->base); 138 kfree(ldata);
150 kfree(ldata); 139 amba_release_regions(adev);
151 }
152 140
153 return 0; 141 return 0;
154} 142}
@@ -158,13 +146,15 @@ static int pl031_probe(struct amba_device *adev, void *id)
158 int ret; 146 int ret;
159 struct pl031_local *ldata; 147 struct pl031_local *ldata;
160 148
149 ret = amba_request_regions(adev, NULL);
150 if (ret)
151 goto err_req;
161 152
162 ldata = kmalloc(sizeof(struct pl031_local), GFP_KERNEL); 153 ldata = kmalloc(sizeof(struct pl031_local), GFP_KERNEL);
163 if (!ldata) { 154 if (!ldata) {
164 ret = -ENOMEM; 155 ret = -ENOMEM;
165 goto out; 156 goto out;
166 } 157 }
167 dev_set_drvdata(&adev->dev, ldata);
168 158
169 ldata->base = ioremap(adev->res.start, 159 ldata->base = ioremap(adev->res.start,
170 adev->res.end - adev->res.start + 1); 160 adev->res.end - adev->res.start + 1);
@@ -173,6 +163,8 @@ static int pl031_probe(struct amba_device *adev, void *id)
173 goto out_no_remap; 163 goto out_no_remap;
174 } 164 }
175 165
166 amba_set_drvdata(adev, ldata);
167
176 if (request_irq(adev->irq[0], pl031_interrupt, IRQF_DISABLED, 168 if (request_irq(adev->irq[0], pl031_interrupt, IRQF_DISABLED,
177 "rtc-pl031", ldata->rtc)) { 169 "rtc-pl031", ldata->rtc)) {
178 ret = -EIO; 170 ret = -EIO;
@@ -192,10 +184,12 @@ out_no_rtc:
192 free_irq(adev->irq[0], ldata->rtc); 184 free_irq(adev->irq[0], ldata->rtc);
193out_no_irq: 185out_no_irq:
194 iounmap(ldata->base); 186 iounmap(ldata->base);
187 amba_set_drvdata(adev, NULL);
195out_no_remap: 188out_no_remap:
196 dev_set_drvdata(&adev->dev, NULL);
197 kfree(ldata); 189 kfree(ldata);
198out: 190out:
191 amba_release_regions(adev);
192err_req:
199 return ret; 193 return ret;
200} 194}
201 195
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f26e0cad8f16..fed86e507fdf 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -26,10 +26,6 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/rtc.h>
30
31#include <asm/mach/time.h>
32
33#include <asm/plat-s3c/regs-rtc.h> 29#include <asm/plat-s3c/regs-rtc.h>
34 30
35/* I have yet to find an S3C implementation with more than one 31/* I have yet to find an S3C implementation with more than one
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 67421b0d3a7b..f47294c60148 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -33,7 +33,6 @@
33 33
34#include <asm/hardware.h> 34#include <asm/hardware.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/rtc.h>
37 36
38#ifdef CONFIG_ARCH_PXA 37#ifdef CONFIG_ARCH_PXA
39#include <asm/arch/pxa-regs.h> 38#include <asm/arch/pxa-regs.h>
@@ -47,6 +46,42 @@ static unsigned long rtc_freq = 1024;
47static struct rtc_time rtc_alarm; 46static struct rtc_time rtc_alarm;
48static DEFINE_SPINLOCK(sa1100_rtc_lock); 47static DEFINE_SPINLOCK(sa1100_rtc_lock);
49 48
49static inline int rtc_periodic_alarm(struct rtc_time *tm)
50{
51 return (tm->tm_year == -1) ||
52 ((unsigned)tm->tm_mon >= 12) ||
53 ((unsigned)(tm->tm_mday - 1) >= 31) ||
54 ((unsigned)tm->tm_hour > 23) ||
55 ((unsigned)tm->tm_min > 59) ||
56 ((unsigned)tm->tm_sec > 59);
57}
58
59/*
60 * Calculate the next alarm time given the requested alarm time mask
61 * and the current time.
62 */
63static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
64{
65 unsigned long next_time;
66 unsigned long now_time;
67
68 next->tm_year = now->tm_year;
69 next->tm_mon = now->tm_mon;
70 next->tm_mday = now->tm_mday;
71 next->tm_hour = alrm->tm_hour;
72 next->tm_min = alrm->tm_min;
73 next->tm_sec = alrm->tm_sec;
74
75 rtc_tm_to_time(now, &now_time);
76 rtc_tm_to_time(next, &next_time);
77
78 if (next_time < now_time) {
79 /* Advance one day */
80 next_time += 60 * 60 * 24;
81 rtc_time_to_tm(next_time, next);
82 }
83}
84
50static int rtc_update_alarm(struct rtc_time *alrm) 85static int rtc_update_alarm(struct rtc_time *alrm)
51{ 86{
52 struct rtc_time alarm_tm, now_tm; 87 struct rtc_time alarm_tm, now_tm;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1a4025683362..1b6c52ef7339 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
995 now = get_clock(); 995 now = get_clock();
996 996
997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
998 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 998 cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
999 (unsigned int) intparm); 999 irb->scsw.cmd.dstat), (unsigned int) intparm);
1000 1000
1001 /* check for unsolicited interrupts */ 1001 /* check for unsolicited interrupts */
1002 cqr = (struct dasd_ccw_req *) intparm; 1002 cqr = (struct dasd_ccw_req *) intparm;
1003 if (!cqr || ((irb->scsw.cc == 1) && 1003 if (!cqr || ((irb->scsw.cmd.cc == 1) &&
1004 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1004 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1005 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1005 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
1006 if (cqr && cqr->status == DASD_CQR_IN_IO) 1006 if (cqr && cqr->status == DASD_CQR_IN_IO)
1007 cqr->status = DASD_CQR_QUEUED; 1007 cqr->status = DASD_CQR_QUEUED;
1008 device = dasd_device_from_cdev_locked(cdev); 1008 device = dasd_device_from_cdev_locked(cdev);
@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1025 1025
1026 /* Check for clear pending */ 1026 /* Check for clear pending */
1027 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1027 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1028 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1028 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1029 cqr->status = DASD_CQR_CLEARED; 1029 cqr->status = DASD_CQR_CLEARED;
1030 dasd_device_clear_timer(device); 1030 dasd_device_clear_timer(device);
1031 wake_up(&dasd_flush_wq); 1031 wake_up(&dasd_flush_wq);
@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1041 return; 1041 return;
1042 } 1042 }
1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1044 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1044 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
1045 next = NULL; 1045 next = NULL;
1046 expires = 0; 1046 expires = 0;
1047 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1047 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1048 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1048 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
1049 /* request was completed successfully */ 1049 /* request was completed successfully */
1050 cqr->status = DASD_CQR_SUCCESS; 1050 cqr->status = DASD_CQR_SUCCESS;
1051 cqr->stopclk = now; 1051 cqr->stopclk = now;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e6700df52df4..5c6e6f331cb0 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1572 1572
1573 /* determine the address of the CCW to be restarted */ 1573 /* determine the address of the CCW to be restarted */
1574 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1574 /* Imprecise ending is not set -> addr from IRB-SCSW */
1575 cpa = default_erp->refers->irb.scsw.cpa; 1575 cpa = default_erp->refers->irb.scsw.cmd.cpa;
1576 1576
1577 if (cpa == 0) { 1577 if (cpa == 0) {
1578 1578
@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1725 1725
1726 /* determine the address of the CCW to be restarted */ 1726 /* determine the address of the CCW to be restarted */
1727 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1727 /* Imprecise ending is not set -> addr from IRB-SCSW */
1728 cpa = previous_erp->irb.scsw.cpa; 1728 cpa = previous_erp->irb.scsw.cmd.cpa;
1729 1729
1730 if (cpa == 0) { 1730 if (cpa == 0) {
1731 1731
@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2171{ 2171{
2172 struct dasd_device *device = erp->startdev; 2172 struct dasd_device *device = erp->startdev;
2173 2173
2174 if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK 2174 if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
2175 | SCHN_STAT_CHN_CTRL_CHK)) { 2175 | SCHN_STAT_CHN_CTRL_CHK)) {
2176 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2176 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2177 "channel or interface control check"); 2177 "channel or interface control check");
@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2352 2352
2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) && 2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
2354 (cqr2->irb.esw.esw0.erw.cons == 0)) { 2354 (cqr2->irb.esw.esw0.erw.cons == 0)) {
2355 if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2355 if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2356 SCHN_STAT_CHN_CTRL_CHK)) == 2356 SCHN_STAT_CHN_CTRL_CHK)) ==
2357 (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2357 (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2358 SCHN_STAT_CHN_CTRL_CHK))) 2358 SCHN_STAT_CHN_CTRL_CHK)))
2359 return 1; /* match with ifcc*/ 2359 return 1; /* match with ifcc*/
2360 } 2360 }
@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2622 } 2622 }
2623 2623
2624 /* double-check if current erp/cqr was successfull */ 2624 /* double-check if current erp/cqr was successfull */
2625 if ((cqr->irb.scsw.cstat == 0x00) && 2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
2626 (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2626 (cqr->irb.scsw.cmd.dstat ==
2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
2627 2628
2628 DEV_MESSAGE(KERN_DEBUG, device, 2629 DEV_MESSAGE(KERN_DEBUG, device,
2629 "ERP called for successful request %p" 2630 "ERP called for successful request %p"
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a0edae091b5e..e0b77210d37a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1404 1404
1405 /* first of all check for state change pending interrupt */ 1405 /* first of all check for state change pending interrupt */
1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1407 if ((irb->scsw.dstat & mask) == mask) { 1407 if ((irb->scsw.cmd.dstat & mask) == mask) {
1408 dasd_generic_handle_state_change(device); 1408 dasd_generic_handle_state_change(device);
1409 return; 1409 return;
1410 } 1410 }
1411 1411
1412 /* summary unit check */ 1412 /* summary unit check */
1413 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { 1413 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
1414 (irb->ecw[7] == 0x0D)) {
1414 dasd_alias_handle_summary_unit_check(device, irb); 1415 dasd_alias_handle_summary_unit_check(device, irb);
1415 return; 1416 return;
1416 } 1417 }
@@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2068 device->cdev->dev.bus_id); 2069 device->cdev->dev.bus_id);
2069 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2070 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2070 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 2071 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2071 irb->scsw.cstat, irb->scsw.dstat); 2072 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
2072 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2073 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2073 " device %s: Failing CCW: %p\n", 2074 " device %s: Failing CCW: %p\n",
2074 device->cdev->dev.bus_id, 2075 device->cdev->dev.bus_id,
2075 (void *) (addr_t) irb->scsw.cpa); 2076 (void *) (addr_t) irb->scsw.cmd.cpa);
2076 if (irb->esw.esw0.erw.cons) { 2077 if (irb->esw.esw0.erw.cons) {
2077 for (sl = 0; sl < 4; sl++) { 2078 for (sl = 0; sl < 4; sl++) {
2078 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2079 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2122 /* scsw->cda is either valid or zero */ 2123 /* scsw->cda is either valid or zero */
2123 len = 0; 2124 len = 0;
2124 from = ++to; 2125 from = ++to;
2125 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ 2126 fail = (struct ccw1 *)(addr_t)
2127 irb->scsw.cmd.cpa; /* failing CCW */
2126 if (from < fail - 2) { 2128 if (from < fail - 2) {
2127 from = fail - 2; /* there is a gap - print header */ 2129 from = fail - 2; /* there is a gap - print header */
2128 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2130 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6e53ab606e97..29da4413ad43 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -15,6 +15,7 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/smp_lock.h>
18 19
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20#include <asm/atomic.h> 21#include <asm/atomic.h>
@@ -525,6 +526,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
525 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 526 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
526 if (!eerb) 527 if (!eerb)
527 return -ENOMEM; 528 return -ENOMEM;
529 lock_kernel();
528 eerb->buffer_page_count = eer_pages; 530 eerb->buffer_page_count = eer_pages;
529 if (eerb->buffer_page_count < 1 || 531 if (eerb->buffer_page_count < 1 ||
530 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 532 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -532,6 +534,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
532 MESSAGE(KERN_WARNING, "can't open device since module " 534 MESSAGE(KERN_WARNING, "can't open device since module "
533 "parameter eer_pages is smaller then 1 or" 535 "parameter eer_pages is smaller then 1 or"
534 " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); 536 " bigger then %d", (int)(INT_MAX / PAGE_SIZE));
537 unlock_kernel();
535 return -EINVAL; 538 return -EINVAL;
536 } 539 }
537 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 540 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -539,12 +542,14 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
539 GFP_KERNEL); 542 GFP_KERNEL);
540 if (!eerb->buffer) { 543 if (!eerb->buffer) {
541 kfree(eerb); 544 kfree(eerb);
545 unlock_kernel();
542 return -ENOMEM; 546 return -ENOMEM;
543 } 547 }
544 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 548 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
545 eerb->buffer_page_count)) { 549 eerb->buffer_page_count)) {
546 kfree(eerb->buffer); 550 kfree(eerb->buffer);
547 kfree(eerb); 551 kfree(eerb);
552 unlock_kernel();
548 return -ENOMEM; 553 return -ENOMEM;
549 } 554 }
550 filp->private_data = eerb; 555 filp->private_data = eerb;
@@ -552,6 +557,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
552 list_add(&eerb->list, &bufferlist); 557 list_add(&eerb->list, &bufferlist);
553 spin_unlock_irqrestore(&bufferlock, flags); 558 spin_unlock_irqrestore(&bufferlock, flags);
554 559
560 unlock_kernel();
555 return nonseekable_open(inp,filp); 561 return nonseekable_open(inp,filp);
556} 562}
557 563
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 116611583df8..aee4656127f7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
222 222
223 /* first of all check for state change pending interrupt */ 223 /* first of all check for state change pending interrupt */
224 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 224 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
225 if ((irb->scsw.dstat & mask) == mask) { 225 if ((irb->scsw.cmd.dstat & mask) == mask) {
226 dasd_generic_handle_state_change(device); 226 dasd_generic_handle_state_change(device);
227 return; 227 return;
228 } 228 }
@@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
449 device->cdev->dev.bus_id); 449 device->cdev->dev.bus_id);
450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
451 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 451 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
452 irb->scsw.cstat, irb->scsw.dstat); 452 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
453 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 453 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
454 " device %s: Failing CCW: %p\n", 454 " device %s: Failing CCW: %p\n",
455 device->cdev->dev.bus_id, 455 device->cdev->dev.bus_id,
456 (void *) (addr_t) irb->scsw.cpa); 456 (void *) (addr_t) irb->scsw.cmd.cpa);
457 if (irb->esw.esw0.erw.cons) { 457 if (irb->esw.esw0.erw.cons) {
458 for (sl = 0; sl < 4; sl++) { 458 for (sl = 0; sl < 4; sl++) {
459 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 459 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
498 498
499 /* print failing CCW area */ 499 /* print failing CCW area */
500 len = 0; 500 len = 0;
501 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { 501 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
502 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; 502 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
504 } 504 }
505 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); 505 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
506 while (act <= end) { 506 while (act <= end) {
507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
508 " CCW %p: %08X %08X DAT:", 508 " CCW %p: %08X %08X DAT:",
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index bb52d2fbac18..01fcdd91b846 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
167 struct dcssblk_dev_info *dev_info; 167 struct dcssblk_dev_info *dev_info;
168 int rc; 168 int rc;
169 169
170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
171 PRINT_WARN("Invalid value, must be 0 or 1\n");
172 return -EINVAL; 171 return -EINVAL;
173 }
174 down_write(&dcssblk_devices_sem); 172 down_write(&dcssblk_devices_sem);
175 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 173 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
176 if (atomic_read(&dev_info->use_count)) { 174 if (atomic_read(&dev_info->use_count)) {
@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
215 set_disk_ro(dev_info->gd, 0); 213 set_disk_ro(dev_info->gd, 0);
216 } 214 }
217 } else { 215 } else {
218 PRINT_WARN("Invalid value, must be 0 or 1\n");
219 rc = -EINVAL; 216 rc = -EINVAL;
220 goto out; 217 goto out;
221 } 218 }
@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
258{ 255{
259 struct dcssblk_dev_info *dev_info; 256 struct dcssblk_dev_info *dev_info;
260 257
261 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 258 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
262 PRINT_WARN("Invalid value, must be 0 or 1\n");
263 return -EINVAL; 259 return -EINVAL;
264 }
265 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 260 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
266 261
267 down_write(&dcssblk_devices_sem); 262 down_write(&dcssblk_devices_sem);
@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
289 } 284 }
290 } else { 285 } else {
291 up_write(&dcssblk_devices_sem); 286 up_write(&dcssblk_devices_sem);
292 PRINT_WARN("Invalid value, must be 0 or 1\n");
293 return -EINVAL; 287 return -EINVAL;
294 } 288 }
295 up_write(&dcssblk_devices_sem); 289 up_write(&dcssblk_devices_sem);
@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
441 goto out; 435 goto out;
442 436
443unregister_dev: 437unregister_dev:
444 PRINT_ERR("device_create_file() failed!\n");
445 list_del(&dev_info->lh); 438 list_del(&dev_info->lh);
446 blk_cleanup_queue(dev_info->dcssblk_queue); 439 blk_cleanup_queue(dev_info->dcssblk_queue);
447 dev_info->gd->queue = NULL; 440 dev_info->gd->queue = NULL;
@@ -702,10 +695,8 @@ dcssblk_check_params(void)
702static void __exit 695static void __exit
703dcssblk_exit(void) 696dcssblk_exit(void)
704{ 697{
705 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
706 s390_root_dev_unregister(dcssblk_root_dev); 698 s390_root_dev_unregister(dcssblk_root_dev);
707 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 699 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
708 PRINT_DEBUG("...finished!\n");
709} 700}
710 701
711static int __init 702static int __init
@@ -713,27 +704,21 @@ dcssblk_init(void)
713{ 704{
714 int rc; 705 int rc;
715 706
716 PRINT_DEBUG("DCSSBLOCK INIT...\n");
717 dcssblk_root_dev = s390_root_dev_register("dcssblk"); 707 dcssblk_root_dev = s390_root_dev_register("dcssblk");
718 if (IS_ERR(dcssblk_root_dev)) { 708 if (IS_ERR(dcssblk_root_dev))
719 PRINT_ERR("device_register() failed!\n");
720 return PTR_ERR(dcssblk_root_dev); 709 return PTR_ERR(dcssblk_root_dev);
721 }
722 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 710 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
723 if (rc) { 711 if (rc) {
724 PRINT_ERR("device_create_file(add) failed!\n");
725 s390_root_dev_unregister(dcssblk_root_dev); 712 s390_root_dev_unregister(dcssblk_root_dev);
726 return rc; 713 return rc;
727 } 714 }
728 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 715 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
729 if (rc) { 716 if (rc) {
730 PRINT_ERR("device_create_file(remove) failed!\n");
731 s390_root_dev_unregister(dcssblk_root_dev); 717 s390_root_dev_unregister(dcssblk_root_dev);
732 return rc; 718 return rc;
733 } 719 }
734 rc = register_blkdev(0, DCSSBLK_NAME); 720 rc = register_blkdev(0, DCSSBLK_NAME);
735 if (rc < 0) { 721 if (rc < 0) {
736 PRINT_ERR("Can't get dynamic major!\n");
737 s390_root_dev_unregister(dcssblk_root_dev); 722 s390_root_dev_unregister(dcssblk_root_dev);
738 return rc; 723 return rc;
739 } 724 }
@@ -742,7 +727,6 @@ dcssblk_init(void)
742 727
743 dcssblk_check_params(); 728 dcssblk_check_params();
744 729
745 PRINT_DEBUG("...finished!\n");
746 return 0; 730 return 0;
747} 731}
748 732
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index f231bc21b1ca..dd9b986389a2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
101 if (cc == 3) 101 if (cc == 3)
102 return -ENXIO; 102 return -ENXIO;
103 if (cc == 2) { 103 if (cc == 2)
104 PRINT_ERR("expanded storage lost!\n");
105 return -ENXIO; 104 return -ENXIO;
106 } 105 if (cc == 1)
107 if (cc == 1) {
108 PRINT_ERR("page in failed for page index %u.\n",
109 xpage_index);
110 return -EIO; 106 return -EIO;
111 }
112 return 0; 107 return 0;
113} 108}
114 109
@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
135 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 130 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
136 if (cc == 3) 131 if (cc == 3)
137 return -ENXIO; 132 return -ENXIO;
138 if (cc == 2) { 133 if (cc == 2)
139 PRINT_ERR("expanded storage lost!\n");
140 return -ENXIO; 134 return -ENXIO;
141 } 135 if (cc == 1)
142 if (cc == 1) {
143 PRINT_ERR("page out failed for page index %u.\n",
144 xpage_index);
145 return -EIO; 136 return -EIO;
146 }
147 return 0; 137 return 0;
148} 138}
149 139
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 3e5653c92f4b..d3ec9b55ab35 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -93,9 +93,6 @@ struct raw3215_info {
93 struct raw3215_req *queued_write;/* pointer to queued write requests */ 93 struct raw3215_req *queued_write;/* pointer to queued write requests */
94 wait_queue_head_t empty_wait; /* wait queue for flushing */ 94 wait_queue_head_t empty_wait; /* wait queue for flushing */
95 struct timer_list timer; /* timer for delayed output */ 95 struct timer_list timer; /* timer for delayed output */
96 char *message; /* pending message from raw3215_irq */
97 int msg_dstat; /* dstat for pending message */
98 int msg_cstat; /* cstat for pending message */
99 int line_pos; /* position on the line (for tabs) */ 96 int line_pos; /* position on the line (for tabs) */
100 char ubuffer[80]; /* copy_from_user buffer */ 97 char ubuffer[80]; /* copy_from_user buffer */
101}; 98};
@@ -359,11 +356,6 @@ raw3215_tasklet(void *data)
359 raw3215_mk_write_req(raw); 356 raw3215_mk_write_req(raw);
360 raw3215_try_io(raw); 357 raw3215_try_io(raw);
361 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 358 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
362 /* Check for pending message from raw3215_irq */
363 if (raw->message != NULL) {
364 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
365 raw->message = NULL;
366 }
367 tty = raw->tty; 359 tty = raw->tty;
368 if (tty != NULL && 360 if (tty != NULL &&
369 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { 361 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
381 struct raw3215_req *req; 373 struct raw3215_req *req;
382 struct tty_struct *tty; 374 struct tty_struct *tty;
383 int cstat, dstat; 375 int cstat, dstat;
384 int count, slen; 376 int count;
385 377
386 raw = cdev->dev.driver_data; 378 raw = cdev->dev.driver_data;
387 req = (struct raw3215_req *) intparm; 379 req = (struct raw3215_req *) intparm;
388 cstat = irb->scsw.cstat; 380 cstat = irb->scsw.cmd.cstat;
389 dstat = irb->scsw.dstat; 381 dstat = irb->scsw.cmd.dstat;
390 if (cstat != 0) { 382 if (cstat != 0)
391 raw->message = KERN_WARNING
392 "Got nonzero channel status in raw3215_irq "
393 "(dev sts 0x%2x, sch sts 0x%2x)";
394 raw->msg_dstat = dstat;
395 raw->msg_cstat = cstat;
396 tasklet_schedule(&raw->tasklet); 383 tasklet_schedule(&raw->tasklet);
397 }
398 if (dstat & 0x01) { /* we got a unit exception */ 384 if (dstat & 0x01) { /* we got a unit exception */
399 dstat &= ~0x01; /* we can ignore it */ 385 dstat &= ~0x01; /* we can ignore it */
400 } 386 }
@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
404 break; 390 break;
405 /* Attention interrupt, someone hit the enter key */ 391 /* Attention interrupt, someone hit the enter key */
406 raw3215_mk_read_req(raw); 392 raw3215_mk_read_req(raw);
407 if (MACHINE_IS_P390)
408 memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
409 tasklet_schedule(&raw->tasklet); 393 tasklet_schedule(&raw->tasklet);
410 break; 394 break;
411 case 0x08: 395 case 0x08:
@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
415 return; /* That shouldn't happen ... */ 399 return; /* That shouldn't happen ... */
416 if (req->type == RAW3215_READ) { 400 if (req->type == RAW3215_READ) {
417 /* store residual count, then wait for device end */ 401 /* store residual count, then wait for device end */
418 req->residual = irb->scsw.count; 402 req->residual = irb->scsw.cmd.count;
419 } 403 }
420 if (dstat == 0x08) 404 if (dstat == 0x08)
421 break; 405 break;
@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
428 412
429 tty = raw->tty; 413 tty = raw->tty;
430 count = 160 - req->residual; 414 count = 160 - req->residual;
431 if (MACHINE_IS_P390) {
432 slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
433 if (count > slen)
434 count = slen;
435 } else
436 EBCASC(raw->inbuf, count); 415 EBCASC(raw->inbuf, count);
437 cchar = ctrlchar_handle(raw->inbuf, count, tty); 416 cchar = ctrlchar_handle(raw->inbuf, count, tty);
438 switch (cchar & CTRLCHAR_MASK) { 417 switch (cchar & CTRLCHAR_MASK) {
@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
481 raw->flags &= ~RAW3215_WORKING; 460 raw->flags &= ~RAW3215_WORKING;
482 raw3215_free_req(req); 461 raw3215_free_req(req);
483 } 462 }
484 raw->message = KERN_WARNING
485 "Spurious interrupt in in raw3215_irq "
486 "(dev sts 0x%2x, sch sts 0x%2x)";
487 raw->msg_dstat = dstat;
488 raw->msg_cstat = cstat;
489 tasklet_schedule(&raw->tasklet); 463 tasklet_schedule(&raw->tasklet);
490 } 464 }
491 return; 465 return;
@@ -883,7 +857,6 @@ con3215_init(void)
883 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); 857 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
884 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); 858 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
885 raw3215[0] = NULL; 859 raw3215[0] = NULL;
886 printk("Couldn't find a 3215 console device\n");
887 return -ENODEV; 860 return -ENODEV;
888 } 861 }
889 register_console(&con3215); 862 register_console(&con3215);
@@ -1157,7 +1130,6 @@ tty3215_init(void)
1157 tty_set_operations(driver, &tty3215_ops); 1130 tty_set_operations(driver, &tty3215_ops);
1158 ret = tty_register_driver(driver); 1131 ret = tty_register_driver(driver);
1159 if (ret) { 1132 if (ret) {
1160 printk("Couldn't register tty3215 driver\n");
1161 put_tty_driver(driver); 1133 put_tty_driver(driver);
1162 return ret; 1134 return ret;
1163 } 1135 }
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 0b040557db02..3c07974886ed 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -411,15 +411,15 @@ static int
411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) 411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
412{ 412{
413 /* Handle ATTN. Schedule tasklet to read aid. */ 413 /* Handle ATTN. Schedule tasklet to read aid. */
414 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 414 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
415 con3270_issue_read(cp); 415 con3270_issue_read(cp);
416 416
417 if (rq) { 417 if (rq) {
418 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 418 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
419 rq->rc = -EIO; 419 rq->rc = -EIO;
420 else 420 else
421 /* Normal end. Copy residual count. */ 421 /* Normal end. Copy residual count. */
422 rq->rescnt = irb->scsw.count; 422 rq->rescnt = irb->scsw.cmd.count;
423 } 423 }
424 return RAW3270_IO_DONE; 424 return RAW3270_IO_DONE;
425} 425}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index ef36f2132aa4..d18e6d2e0b49 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/smp_lock.h>
17 18
18#include <asm/ccwdev.h> 19#include <asm/ccwdev.h>
19#include <asm/cio.h> 20#include <asm/cio.h>
@@ -216,17 +217,17 @@ static int
216fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) 217fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
217{ 218{
218 /* Handle ATTN. Set indication and wake waiters for attention. */ 219 /* Handle ATTN. Set indication and wake waiters for attention. */
219 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 220 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
220 fp->attention = 1; 221 fp->attention = 1;
221 wake_up(&fp->wait); 222 wake_up(&fp->wait);
222 } 223 }
223 224
224 if (rq) { 225 if (rq) {
225 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 226 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
226 rq->rc = -EIO; 227 rq->rc = -EIO;
227 else 228 else
228 /* Normal end. Copy residual count. */ 229 /* Normal end. Copy residual count. */
229 rq->rescnt = irb->scsw.count; 230 rq->rescnt = irb->scsw.cmd.count;
230 } 231 }
231 return RAW3270_IO_DONE; 232 return RAW3270_IO_DONE;
232} 233}
@@ -421,6 +422,7 @@ fs3270_open(struct inode *inode, struct file *filp)
421 422
422 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) 423 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
423 return -ENODEV; 424 return -ENODEV;
425 lock_kernel();
424 minor = iminor(filp->f_path.dentry->d_inode); 426 minor = iminor(filp->f_path.dentry->d_inode);
425 /* Check for minor 0 multiplexer. */ 427 /* Check for minor 0 multiplexer. */
426 if (minor == 0) { 428 if (minor == 0) {
@@ -429,7 +431,8 @@ fs3270_open(struct inode *inode, struct file *filp)
429 tty = get_current_tty(); 431 tty = get_current_tty();
430 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { 432 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
431 mutex_unlock(&tty_mutex); 433 mutex_unlock(&tty_mutex);
432 return -ENODEV; 434 rc = -ENODEV;
435 goto out;
433 } 436 }
434 minor = tty->index + RAW3270_FIRSTMINOR; 437 minor = tty->index + RAW3270_FIRSTMINOR;
435 mutex_unlock(&tty_mutex); 438 mutex_unlock(&tty_mutex);
@@ -438,19 +441,22 @@ fs3270_open(struct inode *inode, struct file *filp)
438 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 441 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
439 if (!IS_ERR(fp)) { 442 if (!IS_ERR(fp)) {
440 raw3270_put_view(&fp->view); 443 raw3270_put_view(&fp->view);
441 return -EBUSY; 444 rc = -EBUSY;
445 goto out;
442 } 446 }
443 /* Allocate fullscreen view structure. */ 447 /* Allocate fullscreen view structure. */
444 fp = fs3270_alloc_view(); 448 fp = fs3270_alloc_view();
445 if (IS_ERR(fp)) 449 if (IS_ERR(fp)) {
446 return PTR_ERR(fp); 450 rc = PTR_ERR(fp);
451 goto out;
452 }
447 453
448 init_waitqueue_head(&fp->wait); 454 init_waitqueue_head(&fp->wait);
449 fp->fs_pid = get_pid(task_pid(current)); 455 fp->fs_pid = get_pid(task_pid(current));
450 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); 456 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
451 if (rc) { 457 if (rc) {
452 fs3270_free_view(&fp->view); 458 fs3270_free_view(&fp->view);
453 return rc; 459 goto out;
454 } 460 }
455 461
456 /* Allocate idal-buffer. */ 462 /* Allocate idal-buffer. */
@@ -458,7 +464,8 @@ fs3270_open(struct inode *inode, struct file *filp)
458 if (IS_ERR(ib)) { 464 if (IS_ERR(ib)) {
459 raw3270_put_view(&fp->view); 465 raw3270_put_view(&fp->view);
460 raw3270_del_view(&fp->view); 466 raw3270_del_view(&fp->view);
461 return PTR_ERR(fp); 467 rc = PTR_ERR(fp);
468 goto out;
462 } 469 }
463 fp->rdbuf = ib; 470 fp->rdbuf = ib;
464 471
@@ -466,9 +473,11 @@ fs3270_open(struct inode *inode, struct file *filp)
466 if (rc) { 473 if (rc) {
467 raw3270_put_view(&fp->view); 474 raw3270_put_view(&fp->view);
468 raw3270_del_view(&fp->view); 475 raw3270_del_view(&fp->view);
469 return rc; 476 goto out;
470 } 477 }
471 filp->private_data = fp; 478 filp->private_data = fp;
479out:
480 unlock_kernel();
472 return 0; 481 return 0;
473} 482}
474 483
@@ -512,11 +521,8 @@ fs3270_init(void)
512 int rc; 521 int rc;
513 522
514 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); 523 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
515 if (rc) { 524 if (rc)
516 printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
517 IBM_FS3270_MAJOR, rc);
518 return rc; 525 return rc;
519 }
520 return 0; 526 return 0;
521} 527}
522 528
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 1e1f50655bbf..35fd8dfcaaa6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,14 +3,14 @@
3 * 3 *
4 * Character device driver for reading z/VM *MONITOR service records. 4 * Character device driver for reading z/VM *MONITOR service records.
5 * 5 *
6 * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright IBM Corp. 2004, 2008
7 * 7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */ 8 */
10 9
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
13#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/smp_lock.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -18,12 +18,11 @@
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/poll.h>
22#include <net/iucv/iucv.h>
21#include <asm/uaccess.h> 23#include <asm/uaccess.h>
22#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
23#include <asm/extmem.h> 25#include <asm/extmem.h>
24#include <linux/poll.h>
25#include <net/iucv/iucv.h>
26
27 26
28//#define MON_DEBUG /* Debug messages on/off */ 27//#define MON_DEBUG /* Debug messages on/off */
29 28
@@ -152,10 +151,7 @@ static int mon_check_mca(struct mon_msg *monmsg)
152 (mon_mca_end(monmsg) > mon_dcss_end) || 151 (mon_mca_end(monmsg) > mon_dcss_end) ||
153 (mon_mca_start(monmsg) < mon_dcss_start) || 152 (mon_mca_start(monmsg) < mon_dcss_start) ||
154 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) 153 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
155 {
156 P_DEBUG("READ, IGNORED INVALID MCA\n\n");
157 return -EINVAL; 154 return -EINVAL;
158 }
159 return 0; 155 return 0;
160} 156}
161 157
@@ -164,10 +160,6 @@ static int mon_send_reply(struct mon_msg *monmsg,
164{ 160{
165 int rc; 161 int rc;
166 162
167 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
168 "0x%08X\n\n",
169 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
170
171 rc = iucv_message_reply(monpriv->path, &monmsg->msg, 163 rc = iucv_message_reply(monpriv->path, &monmsg->msg,
172 IUCV_IPRMDATA, NULL, 0); 164 IUCV_IPRMDATA, NULL, 0);
173 atomic_dec(&monpriv->msglim_count); 165 atomic_dec(&monpriv->msglim_count);
@@ -202,15 +194,12 @@ static struct mon_private *mon_alloc_mem(void)
202 struct mon_private *monpriv; 194 struct mon_private *monpriv;
203 195
204 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 196 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
205 if (!monpriv) { 197 if (!monpriv)
206 P_ERROR("no memory for monpriv\n");
207 return NULL; 198 return NULL;
208 }
209 for (i = 0; i < MON_MSGLIM; i++) { 199 for (i = 0; i < MON_MSGLIM; i++) {
210 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), 200 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
211 GFP_KERNEL); 201 GFP_KERNEL);
212 if (!monpriv->msg_array[i]) { 202 if (!monpriv->msg_array[i]) {
213 P_ERROR("open, no memory for msg_array\n");
214 mon_free_mem(monpriv); 203 mon_free_mem(monpriv);
215 return NULL; 204 return NULL;
216 } 205 }
@@ -218,41 +207,10 @@ static struct mon_private *mon_alloc_mem(void)
218 return monpriv; 207 return monpriv;
219} 208}
220 209
221static inline void mon_read_debug(struct mon_msg *monmsg,
222 struct mon_private *monpriv)
223{
224#ifdef MON_DEBUG
225 u8 msg_type[2], mca_type;
226 unsigned long records_len;
227
228 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
229
230 memcpy(msg_type, &monmsg->msg.class, 2);
231 EBCASC(msg_type, 2);
232 mca_type = mon_mca_type(monmsg, 0);
233 EBCASC(&mca_type, 1);
234
235 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
236 monpriv->read_index, monpriv->write_index);
237 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
238 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
239 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
240 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
241 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
242 P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
243 mon_mca_start(monmsg), mon_mca_end(monmsg));
244 P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
245 mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
246 if (mon_mca_size(monmsg) > 12)
247 P_DEBUG("READ, MORE THAN ONE MCA\n\n");
248#endif
249}
250
251static inline void mon_next_mca(struct mon_msg *monmsg) 210static inline void mon_next_mca(struct mon_msg *monmsg)
252{ 211{
253 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) 212 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
254 return; 213 return;
255 P_DEBUG("READ, NEXT MCA\n\n");
256 monmsg->mca_offset += 12; 214 monmsg->mca_offset += 12;
257 monmsg->pos = 0; 215 monmsg->pos = 0;
258} 216}
@@ -269,7 +227,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv)
269 monmsg->msglim_reached = 0; 227 monmsg->msglim_reached = 0;
270 monmsg->pos = 0; 228 monmsg->pos = 0;
271 monmsg->mca_offset = 0; 229 monmsg->mca_offset = 0;
272 P_WARNING("read, message limit reached\n");
273 monpriv->read_index = (monpriv->read_index + 1) % 230 monpriv->read_index = (monpriv->read_index + 1) %
274 MON_MSGLIM; 231 MON_MSGLIM;
275 atomic_dec(&monpriv->read_ready); 232 atomic_dec(&monpriv->read_ready);
@@ -286,10 +243,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
286{ 243{
287 struct mon_private *monpriv = path->private; 244 struct mon_private *monpriv = path->private;
288 245
289 P_DEBUG("IUCV connection completed\n");
290 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
291 "0x%02X, Sample = 0x%02X\n",
292 ipuser[0], ipuser[1], ipuser[2]);
293 atomic_set(&monpriv->iucv_connected, 1); 246 atomic_set(&monpriv->iucv_connected, 1);
294 wake_up(&mon_conn_wait_queue); 247 wake_up(&mon_conn_wait_queue);
295} 248}
@@ -310,7 +263,6 @@ static void mon_iucv_message_pending(struct iucv_path *path,
310{ 263{
311 struct mon_private *monpriv = path->private; 264 struct mon_private *monpriv = path->private;
312 265
313 P_DEBUG("IUCV message pending\n");
314 memcpy(&monpriv->msg_array[monpriv->write_index]->msg, 266 memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
315 msg, sizeof(*msg)); 267 msg, sizeof(*msg));
316 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { 268 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
@@ -340,6 +292,7 @@ static int mon_open(struct inode *inode, struct file *filp)
340 /* 292 /*
341 * only one user allowed 293 * only one user allowed
342 */ 294 */
295 lock_kernel();
343 rc = -EBUSY; 296 rc = -EBUSY;
344 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 297 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
345 goto out; 298 goto out;
@@ -375,8 +328,8 @@ static int mon_open(struct inode *inode, struct file *filp)
375 rc = -EIO; 328 rc = -EIO;
376 goto out_path; 329 goto out_path;
377 } 330 }
378 P_INFO("open, established connection to *MONITOR service\n\n");
379 filp->private_data = monpriv; 331 filp->private_data = monpriv;
332 unlock_kernel();
380 return nonseekable_open(inode, filp); 333 return nonseekable_open(inode, filp);
381 334
382out_path: 335out_path:
@@ -386,6 +339,7 @@ out_priv:
386out_use: 339out_use:
387 clear_bit(MON_IN_USE, &mon_in_use); 340 clear_bit(MON_IN_USE, &mon_in_use);
388out: 341out:
342 unlock_kernel();
389 return rc; 343 return rc;
390} 344}
391 345
@@ -400,8 +354,6 @@ static int mon_close(struct inode *inode, struct file *filp)
400 rc = iucv_path_sever(monpriv->path, user_data_sever); 354 rc = iucv_path_sever(monpriv->path, user_data_sever);
401 if (rc) 355 if (rc)
402 P_ERROR("close, iucv_sever failed with rc = %i\n", rc); 356 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
403 else
404 P_INFO("close, terminated connection to *MONITOR service\n");
405 357
406 atomic_set(&monpriv->iucv_severed, 0); 358 atomic_set(&monpriv->iucv_severed, 0);
407 atomic_set(&monpriv->iucv_connected, 0); 359 atomic_set(&monpriv->iucv_connected, 0);
@@ -442,10 +394,8 @@ static ssize_t mon_read(struct file *filp, char __user *data,
442 monmsg = monpriv->msg_array[monpriv->read_index]; 394 monmsg = monpriv->msg_array[monpriv->read_index];
443 } 395 }
444 396
445 if (!monmsg->pos) { 397 if (!monmsg->pos)
446 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; 398 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
447 mon_read_debug(monmsg, monpriv);
448 }
449 if (mon_check_mca(monmsg)) 399 if (mon_check_mca(monmsg))
450 goto reply; 400 goto reply;
451 401
@@ -531,7 +481,6 @@ static int __init mon_init(void)
531 P_ERROR("failed to register with iucv driver\n"); 481 P_ERROR("failed to register with iucv driver\n");
532 return rc; 482 return rc;
533 } 483 }
534 P_INFO("open, registered with IUCV\n");
535 484
536 rc = segment_type(mon_dcss_name); 485 rc = segment_type(mon_dcss_name);
537 if (rc < 0) { 486 if (rc < 0) {
@@ -555,13 +504,8 @@ static int __init mon_init(void)
555 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 504 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
556 505
557 rc = misc_register(&mon_dev); 506 rc = misc_register(&mon_dev);
558 if (rc < 0 ) { 507 if (rc < 0 )
559 P_ERROR("misc_register failed, rc = %i\n", rc);
560 goto out; 508 goto out;
561 }
562 P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
563 mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
564 mon_dcss_end - mon_dcss_start + 1);
565 return 0; 509 return 0;
566 510
567out: 511out:
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index a86c0534cd49..4d71aa8c1a79 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -12,6 +12,7 @@
12#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/smp_lock.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -179,10 +180,12 @@ static int monwrite_open(struct inode *inode, struct file *filp)
179 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 180 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
180 if (!monpriv) 181 if (!monpriv)
181 return -ENOMEM; 182 return -ENOMEM;
183 lock_kernel();
182 INIT_LIST_HEAD(&monpriv->list); 184 INIT_LIST_HEAD(&monpriv->list);
183 monpriv->hdr_to_read = sizeof(monpriv->hdr); 185 monpriv->hdr_to_read = sizeof(monpriv->hdr);
184 mutex_init(&monpriv->thread_mutex); 186 mutex_init(&monpriv->thread_mutex);
185 filp->private_data = monpriv; 187 filp->private_data = monpriv;
188 unlock_kernel();
186 return nonseekable_open(inode, filp); 189 return nonseekable_open(inode, filp);
187} 190}
188 191
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 848ef7e8523f..81a96e019080 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
153 struct raw3270_request *rq; 153 struct raw3270_request *rq;
154 154
155 rq = alloc_bootmem_low(sizeof(struct raw3270)); 155 rq = alloc_bootmem_low(sizeof(struct raw3270));
156 if (!rq)
157 return ERR_PTR(-ENOMEM);
158 memset(rq, 0, sizeof(struct raw3270_request));
159 156
160 /* alloc output buffer. */ 157 /* alloc output buffer. */
161 if (size > 0) { 158 if (size > 0)
162 rq->buffer = alloc_bootmem_low(size); 159 rq->buffer = alloc_bootmem_low(size);
163 if (!rq->buffer) {
164 free_bootmem((unsigned long) rq,
165 sizeof(struct raw3270));
166 return ERR_PTR(-ENOMEM);
167 }
168 }
169 rq->size = size; 160 rq->size = size;
170 INIT_LIST_HEAD(&rq->list); 161 INIT_LIST_HEAD(&rq->list);
171 162
@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
372 363
373 if (IS_ERR(irb)) 364 if (IS_ERR(irb))
374 rc = RAW3270_IO_RETRY; 365 rc = RAW3270_IO_RETRY;
375 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 366 else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
376 rq->rc = -EIO; 367 rq->rc = -EIO;
377 rc = RAW3270_IO_DONE; 368 rc = RAW3270_IO_DONE;
378 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | 369 } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
379 DEV_STAT_UNIT_EXCEP)) { 370 DEV_STAT_UNIT_EXCEP)) {
380 /* Handle CE-DE-UE and subsequent UDE */ 371 /* Handle CE-DE-UE and subsequent UDE */
381 set_bit(RAW3270_FLAGS_BUSY, &rp->flags); 372 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
382 rc = RAW3270_IO_BUSY; 373 rc = RAW3270_IO_BUSY;
383 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { 374 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
384 /* Wait for UDE if busy flag is set. */ 375 /* Wait for UDE if busy flag is set. */
385 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 376 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
386 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); 377 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
387 /* Got it, now retry. */ 378 /* Got it, now retry. */
388 rc = RAW3270_IO_RETRY; 379 rc = RAW3270_IO_RETRY;
@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
497 * Unit-Check Processing: 488 * Unit-Check Processing:
498 * Expect Command Reject or Intervention Required. 489 * Expect Command Reject or Intervention Required.
499 */ 490 */
500 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 491 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
501 /* Request finished abnormally. */ 492 /* Request finished abnormally. */
502 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 493 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
503 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 494 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
505 } 496 }
506 } 497 }
507 if (rq) { 498 if (rq) {
508 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 499 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
509 if (irb->ecw[0] & SNS0_CMD_REJECT) 500 if (irb->ecw[0] & SNS0_CMD_REJECT)
510 rq->rc = -EOPNOTSUPP; 501 rq->rc = -EOPNOTSUPP;
511 else 502 else
512 rq->rc = -EIO; 503 rq->rc = -EIO;
513 } else 504 } else
514 /* Request finished normally. Copy residual count. */ 505 /* Request finished normally. Copy residual count. */
515 rq->rescnt = irb->scsw.count; 506 rq->rescnt = irb->scsw.cmd.count;
516 } 507 }
517 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 508 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
518 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); 509 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
519 wake_up(&raw3270_wait_queue); 510 wake_up(&raw3270_wait_queue);
520 } 511 }
@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp)
619 rp->cols = 132; 610 rp->cols = 132;
620 break; 611 break;
621 default: 612 default:
622 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
623 rc = -EOPNOTSUPP; 613 rc = -EOPNOTSUPP;
624 break; 614 break;
625 } 615 }
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 2c7a1ee6b041..3c8b25e6c345 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
506 if (scbuf->validity_sclp_send_mask) 506 if (scbuf->validity_sclp_send_mask)
507 sclp_send_mask = scbuf->sclp_send_mask; 507 sclp_send_mask = scbuf->sclp_send_mask;
508 spin_unlock_irqrestore(&sclp_lock, flags); 508 spin_unlock_irqrestore(&sclp_lock, flags);
509 if (scbuf->validity_sclp_active_facility_mask)
510 sclp_facilities = scbuf->sclp_active_facility_mask;
509 sclp_dispatch_state_change(); 511 sclp_dispatch_state_change();
510} 512}
511 513
@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code)
782 /* Is this the interrupt we are waiting for? */ 784 /* Is this the interrupt we are waiting for? */
783 if (finished_sccb == 0) 785 if (finished_sccb == 0)
784 return; 786 return;
785 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { 787 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
786 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " 788 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
787 "for buffer at 0x%x\n", finished_sccb); 789 finished_sccb);
788 return;
789 }
790 spin_lock(&sclp_lock); 790 spin_lock(&sclp_lock);
791 if (sclp_running_state == sclp_running_state_running) { 791 if (sclp_running_state == sclp_running_state_running) {
792 sclp_init_req.status = SCLP_REQ_DONE; 792 sclp_init_req.status = SCLP_REQ_DONE;
@@ -883,8 +883,6 @@ sclp_init(void)
883 unsigned long flags; 883 unsigned long flags;
884 int rc; 884 int rc;
885 885
886 if (!MACHINE_HAS_SCLP)
887 return -ENODEV;
888 spin_lock_irqsave(&sclp_lock, flags); 886 spin_lock_irqsave(&sclp_lock, flags);
889 /* Check for previous or running initialization */ 887 /* Check for previous or running initialization */
890 if (sclp_init_state != sclp_init_state_uninitialized) { 888 if (sclp_init_state != sclp_init_state_uninitialized) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b5c23396f8fe..0c2b77493db4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -11,6 +11,9 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/memory.h>
14#include <asm/chpid.h> 17#include <asm/chpid.h>
15#include <asm/sclp.h> 18#include <asm/sclp.h>
16#include "sclp.h" 19#include "sclp.h"
@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid;
43 46
44u64 sclp_facilities; 47u64 sclp_facilities;
45static u8 sclp_fac84; 48static u8 sclp_fac84;
49static unsigned long long rzm;
50static unsigned long long rnmax;
46 51
47static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 52static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
48{ 53{
@@ -62,7 +67,7 @@ out:
62 return rc; 67 return rc;
63} 68}
64 69
65void __init sclp_read_info_early(void) 70static void __init sclp_read_info_early(void)
66{ 71{
67 int rc; 72 int rc;
68 int i; 73 int i;
@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void)
92 97
93void __init sclp_facilities_detect(void) 98void __init sclp_facilities_detect(void)
94{ 99{
100 struct read_info_sccb *sccb;
101
102 sclp_read_info_early();
95 if (!early_read_info_sccb_valid) 103 if (!early_read_info_sccb_valid)
96 return; 104 return;
97 sclp_facilities = early_read_info_sccb.facilities; 105
98 sclp_fac84 = early_read_info_sccb.fac84; 106 sccb = &early_read_info_sccb;
107 sclp_facilities = sccb->facilities;
108 sclp_fac84 = sccb->fac84;
109 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
110 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
111 rzm <<= 20;
99} 112}
100 113
101unsigned long long __init sclp_memory_detect(void) 114unsigned long long sclp_get_rnmax(void)
102{ 115{
103 unsigned long long memsize; 116 return rnmax;
104 struct read_info_sccb *sccb; 117}
105 118
106 if (!early_read_info_sccb_valid) 119unsigned long long sclp_get_rzm(void)
107 return 0; 120{
108 sccb = &early_read_info_sccb; 121 return rzm;
109 if (sccb->rnsize)
110 memsize = sccb->rnsize << 20;
111 else
112 memsize = sccb->rnsize2 << 20;
113 if (sccb->rnmax)
114 memsize *= sccb->rnmax;
115 else
116 memsize *= sccb->rnmax2;
117 return memsize;
118} 122}
119 123
120/* 124/*
121 * This function will be called after sclp_memory_detect(), which gets called 125 * This function will be called after sclp_facilities_detect(), which gets
122 * early from early.c code. Therefore the sccb should have valid contents. 126 * called from early.c code. Therefore the sccb should have valid contents.
123 */ 127 */
124void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 128void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
125{ 129{
@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu)
278 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); 282 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
279} 283}
280 284
285#ifdef CONFIG_MEMORY_HOTPLUG
286
287static DEFINE_MUTEX(sclp_mem_mutex);
288static LIST_HEAD(sclp_mem_list);
289static u8 sclp_max_storage_id;
290static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
291
292struct memory_increment {
293 struct list_head list;
294 u16 rn;
295 int standby;
296 int usecount;
297};
298
299struct assign_storage_sccb {
300 struct sccb_header header;
301 u16 rn;
302} __packed;
303
304static unsigned long long rn2addr(u16 rn)
305{
306 return (unsigned long long) (rn - 1) * rzm;
307}
308
309static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
310{
311 struct assign_storage_sccb *sccb;
312 int rc;
313
314 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
315 if (!sccb)
316 return -ENOMEM;
317 sccb->header.length = PAGE_SIZE;
318 sccb->rn = rn;
319 rc = do_sync_request(cmd, sccb);
320 if (rc)
321 goto out;
322 switch (sccb->header.response_code) {
323 case 0x0020:
324 case 0x0120:
325 break;
326 default:
327 rc = -EIO;
328 break;
329 }
330out:
331 free_page((unsigned long) sccb);
332 return rc;
333}
334
335static int sclp_assign_storage(u16 rn)
336{
337 return do_assign_storage(0x000d0001, rn);
338}
339
340static int sclp_unassign_storage(u16 rn)
341{
342 return do_assign_storage(0x000c0001, rn);
343}
344
345struct attach_storage_sccb {
346 struct sccb_header header;
347 u16 :16;
348 u16 assigned;
349 u32 :32;
350 u32 entries[0];
351} __packed;
352
353static int sclp_attach_storage(u8 id)
354{
355 struct attach_storage_sccb *sccb;
356 int rc;
357 int i;
358
359 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
360 if (!sccb)
361 return -ENOMEM;
362 sccb->header.length = PAGE_SIZE;
363 rc = do_sync_request(0x00080001 | id << 8, sccb);
364 if (rc)
365 goto out;
366 switch (sccb->header.response_code) {
367 case 0x0020:
368 set_bit(id, sclp_storage_ids);
369 for (i = 0; i < sccb->assigned; i++)
370 sclp_unassign_storage(sccb->entries[i] >> 16);
371 break;
372 default:
373 rc = -EIO;
374 break;
375 }
376out:
377 free_page((unsigned long) sccb);
378 return rc;
379}
380
381static int sclp_mem_change_state(unsigned long start, unsigned long size,
382 int online)
383{
384 struct memory_increment *incr;
385 unsigned long long istart;
386 int rc = 0;
387
388 list_for_each_entry(incr, &sclp_mem_list, list) {
389 istart = rn2addr(incr->rn);
390 if (start + size - 1 < istart)
391 break;
392 if (start > istart + rzm - 1)
393 continue;
394 if (online) {
395 if (incr->usecount++)
396 continue;
397 /*
398 * Don't break the loop if one assign fails. Loop may
399 * be walked again on CANCEL and we can't save
400 * information if state changed before or not.
401 * So continue and increase usecount for all increments.
402 */
403 rc |= sclp_assign_storage(incr->rn);
404 } else {
405 if (--incr->usecount)
406 continue;
407 sclp_unassign_storage(incr->rn);
408 }
409 }
410 return rc ? -EIO : 0;
411}
412
413static int sclp_mem_notifier(struct notifier_block *nb,
414 unsigned long action, void *data)
415{
416 unsigned long start, size;
417 struct memory_notify *arg;
418 unsigned char id;
419 int rc = 0;
420
421 arg = data;
422 start = arg->start_pfn << PAGE_SHIFT;
423 size = arg->nr_pages << PAGE_SHIFT;
424 mutex_lock(&sclp_mem_mutex);
425 for (id = 0; id <= sclp_max_storage_id; id++)
426 if (!test_bit(id, sclp_storage_ids))
427 sclp_attach_storage(id);
428 switch (action) {
429 case MEM_ONLINE:
430 break;
431 case MEM_GOING_ONLINE:
432 rc = sclp_mem_change_state(start, size, 1);
433 break;
434 case MEM_CANCEL_ONLINE:
435 sclp_mem_change_state(start, size, 0);
436 break;
437 default:
438 rc = -EINVAL;
439 break;
440 }
441 mutex_unlock(&sclp_mem_mutex);
442 return rc ? NOTIFY_BAD : NOTIFY_OK;
443}
444
445static struct notifier_block sclp_mem_nb = {
446 .notifier_call = sclp_mem_notifier,
447};
448
449static void __init add_memory_merged(u16 rn)
450{
451 static u16 first_rn, num;
452 unsigned long long start, size;
453
454 if (rn && first_rn && (first_rn + num == rn)) {
455 num++;
456 return;
457 }
458 if (!first_rn)
459 goto skip_add;
460 start = rn2addr(first_rn);
461 size = (unsigned long long ) num * rzm;
462 if (start >= VMEM_MAX_PHYS)
463 goto skip_add;
464 if (start + size > VMEM_MAX_PHYS)
465 size = VMEM_MAX_PHYS - start;
466 add_memory(0, start, size);
467skip_add:
468 first_rn = rn;
469 num = 1;
470}
471
472static void __init sclp_add_standby_memory(void)
473{
474 struct memory_increment *incr;
475
476 list_for_each_entry(incr, &sclp_mem_list, list)
477 if (incr->standby)
478 add_memory_merged(incr->rn);
479 add_memory_merged(0);
480}
481
482static void __init insert_increment(u16 rn, int standby, int assigned)
483{
484 struct memory_increment *incr, *new_incr;
485 struct list_head *prev;
486 u16 last_rn;
487
488 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
489 if (!new_incr)
490 return;
491 new_incr->rn = rn;
492 new_incr->standby = standby;
493 last_rn = 0;
494 prev = &sclp_mem_list;
495 list_for_each_entry(incr, &sclp_mem_list, list) {
496 if (assigned && incr->rn > rn)
497 break;
498 if (!assigned && incr->rn - last_rn > 1)
499 break;
500 last_rn = incr->rn;
501 prev = &incr->list;
502 }
503 if (!assigned)
504 new_incr->rn = last_rn + 1;
505 if (new_incr->rn > rnmax) {
506 kfree(new_incr);
507 return;
508 }
509 list_add(&new_incr->list, prev);
510}
511
512struct read_storage_sccb {
513 struct sccb_header header;
514 u16 max_id;
515 u16 assigned;
516 u16 standby;
517 u16 :16;
518 u32 entries[0];
519} __packed;
520
521static int __init sclp_detect_standby_memory(void)
522{
523 struct read_storage_sccb *sccb;
524 int i, id, assigned, rc;
525
526 if (!early_read_info_sccb_valid)
527 return 0;
528 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
529 return 0;
530 rc = -ENOMEM;
531 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
532 if (!sccb)
533 goto out;
534 assigned = 0;
535 for (id = 0; id <= sclp_max_storage_id; id++) {
536 memset(sccb, 0, PAGE_SIZE);
537 sccb->header.length = PAGE_SIZE;
538 rc = do_sync_request(0x00040001 | id << 8, sccb);
539 if (rc)
540 goto out;
541 switch (sccb->header.response_code) {
542 case 0x0010:
543 set_bit(id, sclp_storage_ids);
544 for (i = 0; i < sccb->assigned; i++) {
545 if (!sccb->entries[i])
546 continue;
547 assigned++;
548 insert_increment(sccb->entries[i] >> 16, 0, 1);
549 }
550 break;
551 case 0x0310:
552 break;
553 case 0x0410:
554 for (i = 0; i < sccb->assigned; i++) {
555 if (!sccb->entries[i])
556 continue;
557 assigned++;
558 insert_increment(sccb->entries[i] >> 16, 1, 1);
559 }
560 break;
561 default:
562 rc = -EIO;
563 break;
564 }
565 if (!rc)
566 sclp_max_storage_id = sccb->max_id;
567 }
568 if (rc || list_empty(&sclp_mem_list))
569 goto out;
570 for (i = 1; i <= rnmax - assigned; i++)
571 insert_increment(0, 1, 0);
572 rc = register_memory_notifier(&sclp_mem_nb);
573 if (rc)
574 goto out;
575 sclp_add_standby_memory();
576out:
577 free_page((unsigned long) sccb);
578 return rc;
579}
580__initcall(sclp_detect_standby_memory);
581
582#endif /* CONFIG_MEMORY_HOTPLUG */
583
281/* 584/*
282 * Channel path configuration related functions. 585 * Channel path configuration related functions.
283 */ 586 */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index ead1043d788e..7e619c534bf4 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -14,14 +14,13 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/jiffies.h> 15#include <linux/jiffies.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/termios.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
19#include "sclp.h" 20#include "sclp.h"
20#include "sclp_rw.h" 21#include "sclp_rw.h"
21#include "sclp_tty.h" 22#include "sclp_tty.h"
22 23
23#define SCLP_CON_PRINT_HEADER "sclp console driver: "
24
25#define sclp_console_major 4 /* TTYAUX_MAJOR */ 24#define sclp_console_major 4 /* TTYAUX_MAJOR */
26#define sclp_console_minor 64 25#define sclp_console_minor 64
27#define sclp_console_name "ttyS" 26#define sclp_console_name "ttyS"
@@ -222,8 +221,6 @@ sclp_console_init(void)
222 INIT_LIST_HEAD(&sclp_con_pages); 221 INIT_LIST_HEAD(&sclp_con_pages);
223 for (i = 0; i < MAX_CONSOLE_PAGES; i++) { 222 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
224 page = alloc_bootmem_low_pages(PAGE_SIZE); 223 page = alloc_bootmem_low_pages(PAGE_SIZE);
225 if (page == NULL)
226 return -ENOMEM;
227 list_add_tail((struct list_head *) page, &sclp_con_pages); 224 list_add_tail((struct list_head *) page, &sclp_con_pages);
228 } 225 }
229 INIT_LIST_HEAD(&sclp_con_outqueue); 226 INIT_LIST_HEAD(&sclp_con_outqueue);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index ad05a87bc480..fff4ff485d9b 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/kthread.h>
11#include <linux/sysdev.h> 12#include <linux/sysdev.h>
12#include <linux/workqueue.h> 13#include <linux/workqueue.h>
13#include <asm/smp.h> 14#include <asm/smp.h>
@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
40 put_online_cpus(); 41 put_online_cpus();
41} 42}
42 43
43static void __ref sclp_cpu_change_notify(struct work_struct *work) 44static int sclp_cpu_kthread(void *data)
44{ 45{
45 smp_rescan_cpus(); 46 smp_rescan_cpus();
47 return 0;
48}
49
50static void __ref sclp_cpu_change_notify(struct work_struct *work)
51{
52 /* Can't call smp_rescan_cpus() from workqueue context since it may
53 * deadlock in case of cpu hotplug. So we have to create a kernel
54 * thread in order to call it.
55 */
56 kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
46} 57}
47 58
48static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 59static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void)
74 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); 85 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
75 86
76 rc = sclp_register(&sclp_conf_register); 87 rc = sclp_register(&sclp_conf_register);
77 if (rc) { 88 if (rc)
78 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
79 return rc; 89 return rc;
80 }
81 90
82 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { 91 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
83 printk(KERN_WARNING TAG "no configuration management.\n"); 92 printk(KERN_WARNING TAG "no configuration management.\n");
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 9f37456222e9..d887bd261d28 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -27,6 +27,8 @@
27#define CPI_LENGTH_NAME 8 27#define CPI_LENGTH_NAME 8
28#define CPI_LENGTH_LEVEL 16 28#define CPI_LENGTH_LEVEL 16
29 29
30static DEFINE_MUTEX(sclp_cpi_mutex);
31
30struct cpi_evbuf { 32struct cpi_evbuf {
31 struct evbuf_header header; 33 struct evbuf_header header;
32 u8 id_format; 34 u8 id_format;
@@ -124,21 +126,15 @@ static int cpi_req(void)
124 int response; 126 int response;
125 127
126 rc = sclp_register(&sclp_cpi_event); 128 rc = sclp_register(&sclp_cpi_event);
127 if (rc) { 129 if (rc)
128 printk(KERN_WARNING "cpi: could not register "
129 "to hardware console.\n");
130 goto out; 130 goto out;
131 }
132 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { 131 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n");
135 rc = -EOPNOTSUPP; 132 rc = -EOPNOTSUPP;
136 goto out_unregister; 133 goto out_unregister;
137 } 134 }
138 135
139 req = cpi_prepare_req(); 136 req = cpi_prepare_req();
140 if (IS_ERR(req)) { 137 if (IS_ERR(req)) {
141 printk(KERN_WARNING "cpi: could not allocate request\n");
142 rc = PTR_ERR(req); 138 rc = PTR_ERR(req);
143 goto out_unregister; 139 goto out_unregister;
144 } 140 }
@@ -148,10 +144,8 @@ static int cpi_req(void)
148 144
149 /* Add request to sclp queue */ 145 /* Add request to sclp queue */
150 rc = sclp_add_request(req); 146 rc = sclp_add_request(req);
151 if (rc) { 147 if (rc)
152 printk(KERN_WARNING "cpi: could not start request\n");
153 goto out_free_req; 148 goto out_free_req;
154 }
155 149
156 wait_for_completion(&completion); 150 wait_for_completion(&completion);
157 151
@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value)
223static ssize_t system_name_show(struct kobject *kobj, 217static ssize_t system_name_show(struct kobject *kobj,
224 struct kobj_attribute *attr, char *page) 218 struct kobj_attribute *attr, char *page)
225{ 219{
226 return snprintf(page, PAGE_SIZE, "%s\n", system_name); 220 int rc;
221
222 mutex_lock(&sclp_cpi_mutex);
223 rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
224 mutex_unlock(&sclp_cpi_mutex);
225 return rc;
227} 226}
228 227
229static ssize_t system_name_store(struct kobject *kobj, 228static ssize_t system_name_store(struct kobject *kobj,
@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj,
237 if (rc) 236 if (rc)
238 return rc; 237 return rc;
239 238
239 mutex_lock(&sclp_cpi_mutex);
240 set_string(system_name, buf); 240 set_string(system_name, buf);
241 mutex_unlock(&sclp_cpi_mutex);
241 242
242 return len; 243 return len;
243} 244}
@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr =
248static ssize_t sysplex_name_show(struct kobject *kobj, 249static ssize_t sysplex_name_show(struct kobject *kobj,
249 struct kobj_attribute *attr, char *page) 250 struct kobj_attribute *attr, char *page)
250{ 251{
251 return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); 252 int rc;
253
254 mutex_lock(&sclp_cpi_mutex);
255 rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
256 mutex_unlock(&sclp_cpi_mutex);
257 return rc;
252} 258}
253 259
254static ssize_t sysplex_name_store(struct kobject *kobj, 260static ssize_t sysplex_name_store(struct kobject *kobj,
@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj,
262 if (rc) 268 if (rc)
263 return rc; 269 return rc;
264 270
271 mutex_lock(&sclp_cpi_mutex);
265 set_string(sysplex_name, buf); 272 set_string(sysplex_name, buf);
273 mutex_unlock(&sclp_cpi_mutex);
266 274
267 return len; 275 return len;
268} 276}
@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr =
273static ssize_t system_type_show(struct kobject *kobj, 281static ssize_t system_type_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *page) 282 struct kobj_attribute *attr, char *page)
275{ 283{
276 return snprintf(page, PAGE_SIZE, "%s\n", system_type); 284 int rc;
285
286 mutex_lock(&sclp_cpi_mutex);
287 rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
288 mutex_unlock(&sclp_cpi_mutex);
289 return rc;
277} 290}
278 291
279static ssize_t system_type_store(struct kobject *kobj, 292static ssize_t system_type_store(struct kobject *kobj,
@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj,
287 if (rc) 300 if (rc)
288 return rc; 301 return rc;
289 302
303 mutex_lock(&sclp_cpi_mutex);
290 set_string(system_type, buf); 304 set_string(system_type, buf);
305 mutex_unlock(&sclp_cpi_mutex);
291 306
292 return len; 307 return len;
293} 308}
@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr =
298static ssize_t system_level_show(struct kobject *kobj, 313static ssize_t system_level_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *page) 314 struct kobj_attribute *attr, char *page)
300{ 315{
301 unsigned long long level = system_level; 316 unsigned long long level;
302 317
318 mutex_lock(&sclp_cpi_mutex);
319 level = system_level;
320 mutex_unlock(&sclp_cpi_mutex);
303 return snprintf(page, PAGE_SIZE, "%#018llx\n", level); 321 return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
304} 322}
305 323
@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj,
320 if (*endp) 338 if (*endp)
321 return -EINVAL; 339 return -EINVAL;
322 340
341 mutex_lock(&sclp_cpi_mutex);
323 system_level = level; 342 system_level = level;
324 343 mutex_unlock(&sclp_cpi_mutex);
325 return len; 344 return len;
326} 345}
327 346
@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj,
334{ 353{
335 int rc; 354 int rc;
336 355
356 mutex_lock(&sclp_cpi_mutex);
337 rc = cpi_req(); 357 rc = cpi_req();
358 mutex_unlock(&sclp_cpi_mutex);
338 if (rc) 359 if (rc)
339 return rc; 360 return rc;
340 361
@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
373 if (rc) 394 if (rc)
374 return rc; 395 return rc;
375 396
397 mutex_lock(&sclp_cpi_mutex);
376 set_string(system_name, system); 398 set_string(system_name, system);
377 set_string(sysplex_name, sysplex); 399 set_string(sysplex_name, sysplex);
378 set_string(system_type, type); 400 set_string(system_type, type);
379 system_level = level; 401 system_level = level;
380 402
381 return cpi_req(); 403 rc = cpi_req();
404 mutex_unlock(&sclp_cpi_mutex);
405
406 return rc;
382} 407}
383EXPORT_SYMBOL(sclp_cpi_set_data); 408EXPORT_SYMBOL(sclp_cpi_set_data);
384 409
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 45ff25e787cb..84c191c1cd62 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = {
51static int __init 51static int __init
52sclp_quiesce_init(void) 52sclp_quiesce_init(void)
53{ 53{
54 int rc; 54 return sclp_register(&sclp_quiesce_event);
55
56 rc = sclp_register(&sclp_quiesce_event);
57 if (rc)
58 printk(KERN_WARNING "sclp: could not register quiesce handler "
59 "(rc=%d)\n", rc);
60 return rc;
61} 55}
62 56
63module_init(sclp_quiesce_init); 57module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index da09781b32f7..710af42603f8 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -19,8 +19,6 @@
19#include "sclp.h" 19#include "sclp.h"
20#include "sclp_rw.h" 20#include "sclp_rw.h"
21 21
22#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
23
24/* 22/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 23 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the SCLP documentation) 24 * (as it is specified as the maximum size in the SCLP documentation)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 1c064976b32b..8b854857ba07 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void)
239 debug_register_view(sdias_dbf, &debug_sprintf_view); 239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6); 240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register); 241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) { 242 if (rc)
243 ERROR_MSG("sclp register failed\n");
244 return rc; 243 return rc;
245 }
246 init_waitqueue_head(&sdias_wq); 244 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n"); 245 TRACE("init done\n");
248 return 0; 246 return 0;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 40b11521cd20..434ba04b1309 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -13,7 +13,6 @@
13#include <linux/tty.h> 13#include <linux/tty.h>
14#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
15#include <linux/tty_flip.h> 15#include <linux/tty_flip.h>
16#include <linux/wait.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -25,8 +24,6 @@
25#include "sclp_rw.h" 24#include "sclp_rw.h"
26#include "sclp_tty.h" 25#include "sclp_tty.h"
27 26
28#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
29
30/* 27/*
31 * size of a buffer that collects single characters coming in 28 * size of a buffer that collects single characters coming in
32 * via sclp_tty_put_char() 29 * via sclp_tty_put_char()
@@ -50,8 +47,6 @@ static int sclp_tty_buffer_count;
50static struct sclp_buffer *sclp_ttybuf; 47static struct sclp_buffer *sclp_ttybuf;
51/* Timer for delayed output of console messages. */ 48/* Timer for delayed output of console messages. */
52static struct timer_list sclp_tty_timer; 49static struct timer_list sclp_tty_timer;
53/* Waitqueue to wait for buffers to get empty. */
54static wait_queue_head_t sclp_tty_waitq;
55 50
56static struct tty_struct *sclp_tty; 51static struct tty_struct *sclp_tty;
57static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; 52static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
@@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count;
59 54
60struct tty_driver *sclp_tty_driver; 55struct tty_driver *sclp_tty_driver;
61 56
62static struct sclp_ioctls sclp_ioctls; 57static int sclp_tty_tolower;
63static struct sclp_ioctls sclp_ioctls_init = 58static int sclp_tty_columns = 80;
64{ 59
65 8, /* 1 hor. tab. = 8 spaces */ 60#define SPACES_PER_TAB 8
66 0, /* no echo of input by this driver */ 61#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
67 80, /* 80 characters/line */
68 1, /* write after 1/10 s without final new line */
69 MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
70 MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
71 0, /* do not convert to lower case */
72 0x6c /* to seprate upper and lower case */
73 /* ('%' in EBCDIC) */
74};
75 62
76/* This routine is called whenever we try to open a SCLP terminal. */ 63/* This routine is called whenever we try to open a SCLP terminal. */
77static int 64static int
@@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
92 sclp_tty = NULL; 79 sclp_tty = NULL;
93} 80}
94 81
95/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
96static int
97sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
98 unsigned int cmd, unsigned long arg)
99{
100 unsigned long flags;
101 unsigned int obuf;
102 int check;
103 int rc;
104
105 if (tty->flags & (1 << TTY_IO_ERROR))
106 return -EIO;
107 rc = 0;
108 check = 0;
109 switch (cmd) {
110 case TIOCSCLPSHTAB:
111 /* set width of horizontal tab */
112 if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
113 rc = -EFAULT;
114 else
115 check = 1;
116 break;
117 case TIOCSCLPGHTAB:
118 /* get width of horizontal tab */
119 if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
120 rc = -EFAULT;
121 break;
122 case TIOCSCLPSECHO:
123 /* enable/disable echo of input */
124 if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
125 rc = -EFAULT;
126 break;
127 case TIOCSCLPGECHO:
128 /* Is echo of input enabled ? */
129 if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
130 rc = -EFAULT;
131 break;
132 case TIOCSCLPSCOLS:
133 /* set number of columns for output */
134 if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
135 rc = -EFAULT;
136 else
137 check = 1;
138 break;
139 case TIOCSCLPGCOLS:
140 /* get number of columns for output */
141 if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
142 rc = -EFAULT;
143 break;
144 case TIOCSCLPSNL:
145 /* enable/disable writing without final new line character */
146 if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
147 rc = -EFAULT;
148 break;
149 case TIOCSCLPGNL:
150 /* Is writing without final new line character enabled ? */
151 if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
152 rc = -EFAULT;
153 break;
154 case TIOCSCLPSOBUF:
155 /*
156 * set the maximum buffers size for output, will be rounded
157 * up to next 4kB boundary and stored as number of SCCBs
158 * (4kB Buffers) limitation: 256 x 4kB
159 */
160 if (get_user(obuf, (unsigned int __user *) arg) == 0) {
161 if (obuf & 0xFFF)
162 sclp_ioctls.max_sccb = (obuf >> 12) + 1;
163 else
164 sclp_ioctls.max_sccb = (obuf >> 12);
165 } else
166 rc = -EFAULT;
167 break;
168 case TIOCSCLPGOBUF:
169 /* get the maximum buffers size for output */
170 obuf = sclp_ioctls.max_sccb << 12;
171 if (put_user(obuf, (unsigned int __user *) arg))
172 rc = -EFAULT;
173 break;
174 case TIOCSCLPGKBUF:
175 /* get the number of buffers got from kernel at startup */
176 if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
177 rc = -EFAULT;
178 break;
179 case TIOCSCLPSCASE:
180 /* enable/disable conversion from upper to lower case */
181 if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
182 rc = -EFAULT;
183 break;
184 case TIOCSCLPGCASE:
185 /* Is conversion from upper to lower case of input enabled? */
186 if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
187 rc = -EFAULT;
188 break;
189 case TIOCSCLPSDELIM:
190 /*
191 * set special character used for separating upper and
192 * lower case, 0x00 disables this feature
193 */
194 if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
195 rc = -EFAULT;
196 break;
197 case TIOCSCLPGDELIM:
198 /*
199 * get special character used for separating upper and
200 * lower case, 0x00 disables this feature
201 */
202 if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
203 rc = -EFAULT;
204 break;
205 case TIOCSCLPSINIT:
206 /* set initial (default) sclp ioctls */
207 sclp_ioctls = sclp_ioctls_init;
208 check = 1;
209 break;
210 default:
211 rc = -ENOIOCTLCMD;
212 break;
213 }
214 if (check) {
215 spin_lock_irqsave(&sclp_tty_lock, flags);
216 if (sclp_ttybuf != NULL) {
217 sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
218 sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
219 }
220 spin_unlock_irqrestore(&sclp_tty_lock, flags);
221 }
222 return rc;
223}
224
225/* 82/*
226 * This routine returns the numbers of characters the tty driver 83 * This routine returns the numbers of characters the tty driver
227 * will accept for queuing to be written. This number is subject 84 * will accept for queuing to be written. This number is subject
@@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
268 struct sclp_buffer, list); 125 struct sclp_buffer, list);
269 spin_unlock_irqrestore(&sclp_tty_lock, flags); 126 spin_unlock_irqrestore(&sclp_tty_lock, flags);
270 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); 127 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
271 wake_up(&sclp_tty_waitq);
272 /* check if the tty needs a wake up call */ 128 /* check if the tty needs a wake up call */
273 if (sclp_tty != NULL) { 129 if (sclp_tty != NULL) {
274 tty_wakeup(sclp_tty); 130 tty_wakeup(sclp_tty);
@@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data)
316/* 172/*
317 * Write a string to the sclp tty. 173 * Write a string to the sclp tty.
318 */ 174 */
319static void 175static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
320sclp_tty_write_string(const unsigned char *str, int count)
321{ 176{
322 unsigned long flags; 177 unsigned long flags;
323 void *page; 178 void *page;
324 int written; 179 int written;
180 int overall_written;
325 struct sclp_buffer *buf; 181 struct sclp_buffer *buf;
326 182
327 if (count <= 0) 183 if (count <= 0)
328 return; 184 return 0;
185 overall_written = 0;
329 spin_lock_irqsave(&sclp_tty_lock, flags); 186 spin_lock_irqsave(&sclp_tty_lock, flags);
330 do { 187 do {
331 /* Create a sclp output buffer if none exists yet */ 188 /* Create a sclp output buffer if none exists yet */
332 if (sclp_ttybuf == NULL) { 189 if (sclp_ttybuf == NULL) {
333 while (list_empty(&sclp_tty_pages)) { 190 while (list_empty(&sclp_tty_pages)) {
334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 191 spin_unlock_irqrestore(&sclp_tty_lock, flags);
335 if (in_interrupt()) 192 if (may_fail)
336 sclp_sync_wait(); 193 goto out;
337 else 194 else
338 wait_event(sclp_tty_waitq, 195 sclp_sync_wait();
339 !list_empty(&sclp_tty_pages));
340 spin_lock_irqsave(&sclp_tty_lock, flags); 196 spin_lock_irqsave(&sclp_tty_lock, flags);
341 } 197 }
342 page = sclp_tty_pages.next; 198 page = sclp_tty_pages.next;
343 list_del((struct list_head *) page); 199 list_del((struct list_head *) page);
344 sclp_ttybuf = sclp_make_buffer(page, 200 sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
345 sclp_ioctls.columns, 201 SPACES_PER_TAB);
346 sclp_ioctls.htab);
347 } 202 }
348 /* try to write the string to the current output buffer */ 203 /* try to write the string to the current output buffer */
349 written = sclp_write(sclp_ttybuf, str, count); 204 written = sclp_write(sclp_ttybuf, str, count);
205 overall_written += written;
350 if (written == count) 206 if (written == count)
351 break; 207 break;
352 /* 208 /*
@@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count)
363 count -= written; 219 count -= written;
364 } while (count > 0); 220 } while (count > 0);
365 /* Setup timer to output current console buffer after 1/10 second */ 221 /* Setup timer to output current console buffer after 1/10 second */
366 if (sclp_ioctls.final_nl) { 222 if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
367 if (sclp_ttybuf != NULL && 223 !timer_pending(&sclp_tty_timer)) {
368 sclp_chars_in_buffer(sclp_ttybuf) != 0 && 224 init_timer(&sclp_tty_timer);
369 !timer_pending(&sclp_tty_timer)) { 225 sclp_tty_timer.function = sclp_tty_timeout;
370 init_timer(&sclp_tty_timer); 226 sclp_tty_timer.data = 0UL;
371 sclp_tty_timer.function = sclp_tty_timeout; 227 sclp_tty_timer.expires = jiffies + HZ/10;
372 sclp_tty_timer.data = 0UL; 228 add_timer(&sclp_tty_timer);
373 sclp_tty_timer.expires = jiffies + HZ/10;
374 add_timer(&sclp_tty_timer);
375 }
376 } else {
377 if (sclp_ttybuf != NULL &&
378 sclp_chars_in_buffer(sclp_ttybuf) != 0) {
379 buf = sclp_ttybuf;
380 sclp_ttybuf = NULL;
381 spin_unlock_irqrestore(&sclp_tty_lock, flags);
382 __sclp_ttybuf_emit(buf);
383 spin_lock_irqsave(&sclp_tty_lock, flags);
384 }
385 } 229 }
386 spin_unlock_irqrestore(&sclp_tty_lock, flags); 230 spin_unlock_irqrestore(&sclp_tty_lock, flags);
231out:
232 return overall_written;
387} 233}
388 234
389/* 235/*
@@ -395,11 +241,10 @@ static int
395sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) 241sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
396{ 242{
397 if (sclp_tty_chars_count > 0) { 243 if (sclp_tty_chars_count > 0) {
398 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 244 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
399 sclp_tty_chars_count = 0; 245 sclp_tty_chars_count = 0;
400 } 246 }
401 sclp_tty_write_string(buf, count); 247 return sclp_tty_write_string(buf, count, 1);
402 return count;
403} 248}
404 249
405/* 250/*
@@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
417{ 262{
418 sclp_tty_chars[sclp_tty_chars_count++] = ch; 263 sclp_tty_chars[sclp_tty_chars_count++] = ch;
419 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { 264 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
420 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 265 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
421 sclp_tty_chars_count = 0; 266 sclp_tty_chars_count = 0;
422 } return 1; 267 }
268 return 1;
423} 269}
424 270
425/* 271/*
@@ -430,7 +276,7 @@ static void
430sclp_tty_flush_chars(struct tty_struct *tty) 276sclp_tty_flush_chars(struct tty_struct *tty)
431{ 277{
432 if (sclp_tty_chars_count > 0) { 278 if (sclp_tty_chars_count > 0) {
433 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 279 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
434 sclp_tty_chars_count = 0; 280 sclp_tty_chars_count = 0;
435 } 281 }
436} 282}
@@ -469,7 +315,7 @@ static void
469sclp_tty_flush_buffer(struct tty_struct *tty) 315sclp_tty_flush_buffer(struct tty_struct *tty)
470{ 316{
471 if (sclp_tty_chars_count > 0) { 317 if (sclp_tty_chars_count > 0) {
472 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 318 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
473 sclp_tty_chars_count = 0; 319 sclp_tty_chars_count = 0;
474 } 320 }
475} 321}
@@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
517 * modifiy original string, 363 * modifiy original string,
518 * returns length of resulting string 364 * returns length of resulting string
519 */ 365 */
520static int 366static int sclp_switch_cases(unsigned char *buf, int count)
521sclp_switch_cases(unsigned char *buf, int count,
522 unsigned char delim, int tolower)
523{ 367{
524 unsigned char *ip, *op; 368 unsigned char *ip, *op;
525 int toggle; 369 int toggle;
@@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count,
529 ip = op = buf; 373 ip = op = buf;
530 while (count-- > 0) { 374 while (count-- > 0) {
531 /* compare with special character */ 375 /* compare with special character */
532 if (*ip == delim) { 376 if (*ip == CASE_DELIMITER) {
533 /* followed by another special character? */ 377 /* followed by another special character? */
534 if (count && ip[1] == delim) { 378 if (count && ip[1] == CASE_DELIMITER) {
535 /* 379 /*
536 * ... then put a single copy of the special 380 * ... then put a single copy of the special
537 * character to the output string 381 * character to the output string
@@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count,
550 /* not the special character */ 394 /* not the special character */
551 if (toggle) 395 if (toggle)
552 /* but case switching is on */ 396 /* but case switching is on */
553 if (tolower) 397 if (sclp_tty_tolower)
554 /* switch to uppercase */ 398 /* switch to uppercase */
555 *op++ = _ebc_toupper[(int) *ip++]; 399 *op++ = _ebc_toupper[(int) *ip++];
556 else 400 else
@@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end)
570 int count; 414 int count;
571 415
572 count = end - start; 416 count = end - start;
573 /* 417 if (sclp_tty_tolower)
574 * if set in ioctl convert EBCDIC to lower case
575 * (modify original input in SCCB)
576 */
577 if (sclp_ioctls.tolower)
578 EBC_TOLOWER(start, count); 418 EBC_TOLOWER(start, count);
579 419 count = sclp_switch_cases(start, count);
580 /*
581 * if set in ioctl find out characters in lower or upper case
582 * (depends on current case) separated by a special character,
583 * works on EBCDIC
584 */
585 if (sclp_ioctls.delim)
586 count = sclp_switch_cases(start, count,
587 sclp_ioctls.delim,
588 sclp_ioctls.tolower);
589
590 /* convert EBCDIC to ASCII (modify original input in SCCB) */ 420 /* convert EBCDIC to ASCII (modify original input in SCCB) */
591 sclp_ebcasc_str(start, count); 421 sclp_ebcasc_str(start, count);
592 422
593 /* if set in ioctl write operators input to console */
594 if (sclp_ioctls.echo)
595 sclp_tty_write(sclp_tty, start, count);
596
597 /* transfer input to high level driver */ 423 /* transfer input to high level driver */
598 sclp_tty_input(start, count); 424 sclp_tty_input(start, count);
599} 425}
@@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = {
717 .write_room = sclp_tty_write_room, 543 .write_room = sclp_tty_write_room,
718 .chars_in_buffer = sclp_tty_chars_in_buffer, 544 .chars_in_buffer = sclp_tty_chars_in_buffer,
719 .flush_buffer = sclp_tty_flush_buffer, 545 .flush_buffer = sclp_tty_flush_buffer,
720 .ioctl = sclp_tty_ioctl,
721}; 546};
722 547
723static int __init 548static int __init
@@ -736,9 +561,6 @@ sclp_tty_init(void)
736 561
737 rc = sclp_rw_init(); 562 rc = sclp_rw_init();
738 if (rc) { 563 if (rc) {
739 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
740 "could not register tty - "
741 "sclp_rw_init returned %d\n", rc);
742 put_tty_driver(driver); 564 put_tty_driver(driver);
743 return rc; 565 return rc;
744 } 566 }
@@ -754,7 +576,6 @@ sclp_tty_init(void)
754 } 576 }
755 INIT_LIST_HEAD(&sclp_tty_outqueue); 577 INIT_LIST_HEAD(&sclp_tty_outqueue);
756 spin_lock_init(&sclp_tty_lock); 578 spin_lock_init(&sclp_tty_lock);
757 init_waitqueue_head(&sclp_tty_waitq);
758 init_timer(&sclp_tty_timer); 579 init_timer(&sclp_tty_timer);
759 sclp_ttybuf = NULL; 580 sclp_ttybuf = NULL;
760 sclp_tty_buffer_count = 0; 581 sclp_tty_buffer_count = 0;
@@ -763,11 +584,10 @@ sclp_tty_init(void)
763 * save 4 characters for the CPU number 584 * save 4 characters for the CPU number
764 * written at start of each line by VM/CP 585 * written at start of each line by VM/CP
765 */ 586 */
766 sclp_ioctls_init.columns = 76; 587 sclp_tty_columns = 76;
767 /* case input lines to lowercase */ 588 /* case input lines to lowercase */
768 sclp_ioctls_init.tolower = 1; 589 sclp_tty_tolower = 1;
769 } 590 }
770 sclp_ioctls = sclp_ioctls_init;
771 sclp_tty_chars_count = 0; 591 sclp_tty_chars_count = 0;
772 sclp_tty = NULL; 592 sclp_tty = NULL;
773 593
@@ -792,9 +612,6 @@ sclp_tty_init(void)
792 tty_set_operations(driver, &sclp_ops); 612 tty_set_operations(driver, &sclp_ops);
793 rc = tty_register_driver(driver); 613 rc = tty_register_driver(driver);
794 if (rc) { 614 if (rc) {
795 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
796 "could not register tty - "
797 "tty_register_driver returned %d\n", rc);
798 put_tty_driver(driver); 615 put_tty_driver(driver);
799 return rc; 616 return rc;
800 } 617 }
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index 0ce2c1fc5340..4b965b22fecd 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -11,61 +11,8 @@
11#ifndef __SCLP_TTY_H__ 11#ifndef __SCLP_TTY_H__
12#define __SCLP_TTY_H__ 12#define __SCLP_TTY_H__
13 13
14#include <linux/ioctl.h>
15#include <linux/termios.h>
16#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
17 15
18/* This is the type of data structures storing sclp ioctl setting. */
19struct sclp_ioctls {
20 unsigned short htab;
21 unsigned char echo;
22 unsigned short columns;
23 unsigned char final_nl;
24 unsigned short max_sccb;
25 unsigned short kmem_sccb; /* can't be modified at run time */
26 unsigned char tolower;
27 unsigned char delim;
28};
29
30/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
31#define SCLP_IOCTL_LETTER 'B'
32
33/* set width of horizontal tabulator */
34#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
35/* enable/disable echo of input (independent from line discipline) */
36#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
37/* set number of colums for output */
38#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
39/* enable/disable writing without final new line character */
40#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
41/* set the maximum buffers size for output, rounded up to next 4kB boundary */
42#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
43/* set initial (default) sclp ioctls */
44#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
45/* enable/disable conversion from upper to lower case of input */
46#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
47/* set special character used for separating upper and lower case, */
48/* 0x00 disables this feature */
49#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
50
51/* get width of horizontal tabulator */
52#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
53/* Is echo of input enabled ? (independent from line discipline) */
54#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
55/* get number of colums for output */
56#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
57/* Is writing without final new line character enabled ? */
58#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
59/* get the maximum buffers size for output */
60#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
61/* Is conversion from upper to lower case of input enabled ? */
62#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
63/* get special character used for separating upper and lower case, */
64/* 0x00 disables this feature */
65#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
66/* get the number of buffers/pages got from kernel at startup */
67#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
68
69extern struct tty_driver *sclp_tty_driver; 16extern struct tty_driver *sclp_tty_driver;
70 17
71#endif /* __SCLP_TTY_H__ */ 18#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 3e577f655b18..ad51738c4261 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -27,7 +27,6 @@
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include "sclp.h" 28#include "sclp.h"
29 29
30#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
31#define SCLP_VT220_MAJOR TTY_MAJOR 30#define SCLP_VT220_MAJOR TTY_MAJOR
32#define SCLP_VT220_MINOR 65 31#define SCLP_VT220_MINOR 65
33#define SCLP_VT220_DRIVER_NAME "sclp_vt220" 32#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
@@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request;
82/* Number of characters in current request buffer */ 81/* Number of characters in current request buffer */
83static int sclp_vt220_buffered_chars; 82static int sclp_vt220_buffered_chars;
84 83
85/* Flag indicating whether this driver has already been initialized */ 84/* Counter controlling core driver initialization. */
86static int sclp_vt220_initialized = 0; 85static int __initdata sclp_vt220_init_count;
87 86
88/* Flag indicating that sclp_vt220_current_request should really 87/* Flag indicating that sclp_vt220_current_request should really
89 * have been already queued but wasn't because the SCLP was processing 88 * have been already queued but wasn't because the SCLP was processing
@@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
609 sclp_vt220_emit_current(); 608 sclp_vt220_emit_current();
610} 609}
611 610
612/* 611/* Release allocated pages. */
613 * Initialize all relevant components and register driver with system. 612static void __init __sclp_vt220_free_pages(void)
614 */
615static void __init __sclp_vt220_cleanup(void)
616{ 613{
617 struct list_head *page, *p; 614 struct list_head *page, *p;
618 615
@@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void)
623 else 620 else
624 free_bootmem((unsigned long) page, PAGE_SIZE); 621 free_bootmem((unsigned long) page, PAGE_SIZE);
625 } 622 }
626 if (!list_empty(&sclp_vt220_register.list))
627 sclp_unregister(&sclp_vt220_register);
628 sclp_vt220_initialized = 0;
629} 623}
630 624
631static int __init __sclp_vt220_init(void) 625/* Release memory and unregister from sclp core. Controlled by init counting -
626 * only the last invoker will actually perform these actions. */
627static void __init __sclp_vt220_cleanup(void)
628{
629 sclp_vt220_init_count--;
630 if (sclp_vt220_init_count != 0)
631 return;
632 sclp_unregister(&sclp_vt220_register);
633 __sclp_vt220_free_pages();
634}
635
636/* Allocate buffer pages and register with sclp core. Controlled by init
637 * counting - only the first invoker will actually perform these actions. */
638static int __init __sclp_vt220_init(int num_pages)
632{ 639{
633 void *page; 640 void *page;
634 int i; 641 int i;
635 int num_pages;
636 int rc; 642 int rc;
637 643
638 if (sclp_vt220_initialized) 644 sclp_vt220_init_count++;
645 if (sclp_vt220_init_count != 1)
639 return 0; 646 return 0;
640 sclp_vt220_initialized = 1;
641 spin_lock_init(&sclp_vt220_lock); 647 spin_lock_init(&sclp_vt220_lock);
642 INIT_LIST_HEAD(&sclp_vt220_empty); 648 INIT_LIST_HEAD(&sclp_vt220_empty);
643 INIT_LIST_HEAD(&sclp_vt220_outqueue); 649 INIT_LIST_HEAD(&sclp_vt220_outqueue);
@@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void)
649 sclp_vt220_flush_later = 0; 655 sclp_vt220_flush_later = 0;
650 656
651 /* Allocate pages for output buffering */ 657 /* Allocate pages for output buffering */
652 num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
653 for (i = 0; i < num_pages; i++) { 658 for (i = 0; i < num_pages; i++) {
654 if (slab_is_available()) 659 if (slab_is_available())
655 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 660 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
656 else 661 else
657 page = alloc_bootmem_low_pages(PAGE_SIZE); 662 page = alloc_bootmem_low_pages(PAGE_SIZE);
658 if (!page) { 663 if (!page) {
659 __sclp_vt220_cleanup(); 664 rc = -ENOMEM;
660 return -ENOMEM; 665 goto out;
661 } 666 }
662 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 667 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
663 } 668 }
664 rc = sclp_register(&sclp_vt220_register); 669 rc = sclp_register(&sclp_vt220_register);
670out:
665 if (rc) { 671 if (rc) {
666 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 672 __sclp_vt220_free_pages();
667 "could not register vt220 - " 673 sclp_vt220_init_count--;
668 "sclp_register returned %d\n", rc);
669 __sclp_vt220_cleanup();
670 } 674 }
671 return rc; 675 return rc;
672} 676}
@@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void)
689{ 693{
690 struct tty_driver *driver; 694 struct tty_driver *driver;
691 int rc; 695 int rc;
692 int cleanup;
693 696
694 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 697 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
695 * symmetry between VM and LPAR systems regarding ttyS1. */ 698 * symmetry between VM and LPAR systems regarding ttyS1. */
696 driver = alloc_tty_driver(1); 699 driver = alloc_tty_driver(1);
697 if (!driver) 700 if (!driver)
698 return -ENOMEM; 701 return -ENOMEM;
699 cleanup = !sclp_vt220_initialized; 702 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
700 rc = __sclp_vt220_init();
701 if (rc) 703 if (rc)
702 goto out_driver; 704 goto out_driver;
703 705
@@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void)
713 tty_set_operations(driver, &sclp_vt220_ops); 715 tty_set_operations(driver, &sclp_vt220_ops);
714 716
715 rc = tty_register_driver(driver); 717 rc = tty_register_driver(driver);
716 if (rc) { 718 if (rc)
717 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
718 "could not register tty - "
719 "tty_register_driver returned %d\n", rc);
720 goto out_init; 719 goto out_init;
721 }
722 sclp_vt220_driver = driver; 720 sclp_vt220_driver = driver;
723 return 0; 721 return 0;
724 722
725out_init: 723out_init:
726 if (cleanup) 724 __sclp_vt220_cleanup();
727 __sclp_vt220_cleanup();
728out_driver: 725out_driver:
729 put_tty_driver(driver); 726 put_tty_driver(driver);
730 return rc; 727 return rc;
@@ -773,10 +770,9 @@ sclp_vt220_con_init(void)
773{ 770{
774 int rc; 771 int rc;
775 772
776 INIT_LIST_HEAD(&sclp_vt220_register.list);
777 if (!CONSOLE_IS_SCLP) 773 if (!CONSOLE_IS_SCLP)
778 return 0; 774 return 0;
779 rc = __sclp_vt220_init(); 775 rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
780 if (rc) 776 if (rc)
781 return rc; 777 return rc;
782 /* Attach linux console */ 778 /* Attach linux console */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 874adf365e46..22ca34361ed7 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request)
196static int 196static int
197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) 197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
198{ 198{
199 if (irb->scsw.dstat == 0x85 /* READY */) { 199 if (irb->scsw.cmd.dstat == 0x85) { /* READY */
200 /* A medium was inserted in the drive. */ 200 /* A medium was inserted in the drive. */
201 DBF_EVENT(6, "xuud med\n"); 201 DBF_EVENT(6, "xuud med\n");
202 tape_34xx_delete_sbid_from(device, 0); 202 tape_34xx_delete_sbid_from(device, 0);
@@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
844 if (request == NULL) 844 if (request == NULL)
845 return tape_34xx_unsolicited_irq(device, irb); 845 return tape_34xx_unsolicited_irq(device, irb);
846 846
847 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 847 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
848 (irb->scsw.dstat & DEV_STAT_DEV_END) && 848 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
849 (request->op == TO_WRI)) { 849 (request->op == TO_WRI)) {
850 /* Write at end of volume */ 850 /* Write at end of volume */
851 PRINT_INFO("End of volume\n"); /* XXX */ 851 PRINT_INFO("End of volume\n"); /* XXX */
852 return tape_34xx_erp_failed(request, -ENOSPC); 852 return tape_34xx_erp_failed(request, -ENOSPC);
853 } 853 }
854 854
855 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 855 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
856 return tape_34xx_unit_check(device, request, irb); 856 return tape_34xx_unit_check(device, request, irb);
857 857
858 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 858 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
859 /* 859 /*
860 * A unit exception occurs on skipping over a tapemark block. 860 * A unit exception occurs on skipping over a tapemark block.
861 */ 861 */
862 if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 862 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
863 if (request->op == TO_BSB || request->op == TO_FSB) 863 if (request->op == TO_BSB || request->op == TO_FSB)
864 request->rescnt++; 864 request->rescnt++;
865 else 865 else
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 42ce7915fc5d..839987618ffd 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
837static int 837static int
838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) 838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
839{ 839{
840 if (irb->scsw.dstat == DEV_STAT_CHN_END) 840 if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
841 /* Probably result of halt ssch */ 841 /* Probably result of halt ssch */
842 return TAPE_IO_PENDING; 842 return TAPE_IO_PENDING;
843 else if (irb->scsw.dstat == 0x85) 843 else if (irb->scsw.cmd.dstat == 0x85)
844 /* Device Ready */ 844 /* Device Ready */
845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); 845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
846 else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 846 else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
847 tape_3590_schedule_work(device, TO_READ_ATTMSG); 847 tape_3590_schedule_work(device, TO_READ_ATTMSG);
848 } else { 848 } else {
849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
@@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1515 if (request == NULL) 1515 if (request == NULL)
1516 return tape_3590_unsolicited_irq(device, irb); 1516 return tape_3590_unsolicited_irq(device, irb);
1517 1517
1518 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 1518 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
1519 (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { 1519 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
1520 (request->op == TO_WRI)) {
1520 /* Write at end of volume */ 1521 /* Write at end of volume */
1521 DBF_EVENT(2, "End of volume\n"); 1522 DBF_EVENT(2, "End of volume\n");
1522 return tape_3590_erp_failed(device, request, irb, -ENOSPC); 1523 return tape_3590_erp_failed(device, request, irb, -ENOSPC);
1523 } 1524 }
1524 1525
1525 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 1526 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
1526 return tape_3590_unit_check(device, request, irb); 1527 return tape_3590_unit_check(device, request, irb);
1527 1528
1528 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 1529 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1529 if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { 1530 if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
1530 if (request->op == TO_FSB || request->op == TO_BSB) 1531 if (request->op == TO_FSB || request->op == TO_BSB)
1531 request->rescnt++; 1532 request->rescnt++;
1532 else 1533 else
@@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1536 return tape_3590_done(device, request); 1537 return tape_3590_done(device, request);
1537 } 1538 }
1538 1539
1539 if (irb->scsw.dstat & DEV_STAT_CHN_END) { 1540 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
1540 DBF_EVENT(2, "cannel end\n"); 1541 DBF_EVENT(2, "cannel end\n");
1541 return TAPE_IO_PENDING; 1542 return TAPE_IO_PENDING;
1542 } 1543 }
1543 1544
1544 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1545 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1545 DBF_EVENT(2, "Unit Attention when busy..\n"); 1546 DBF_EVENT(2, "Unit Attention when busy..\n");
1546 return TAPE_IO_PENDING; 1547 return TAPE_IO_PENDING;
1547 } 1548 }
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index ebe84067bae9..687720b552d1 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/mtio.h> 16#include <linux/mtio.h>
17#include <linux/smp_lock.h>
17 18
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19 20
@@ -289,21 +290,26 @@ tapechar_open (struct inode *inode, struct file *filp)
289 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 290 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
290 return -ENODEV; 291 return -ENODEV;
291 292
293 lock_kernel();
292 minor = iminor(filp->f_path.dentry->d_inode); 294 minor = iminor(filp->f_path.dentry->d_inode);
293 device = tape_get_device(minor / TAPE_MINORS_PER_DEV); 295 device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
294 if (IS_ERR(device)) { 296 if (IS_ERR(device)) {
295 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); 297 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
296 return PTR_ERR(device); 298 rc = PTR_ERR(device);
299 goto out;
297 } 300 }
298 301
299 302
300 rc = tape_open(device); 303 rc = tape_open(device);
301 if (rc == 0) { 304 if (rc == 0) {
302 filp->private_data = device; 305 filp->private_data = device;
303 return nonseekable_open(inode, filp); 306 rc = nonseekable_open(inode, filp);
304 } 307 }
305 tape_put_device(device); 308 else
309 tape_put_device(device);
306 310
311out:
312 unlock_kernel();
307 return rc; 313 return rc;
308} 314}
309 315
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c20e3c548343..181a5441af16 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
839 839
840 PRINT_INFO("-------------------------------------------------\n"); 840 PRINT_INFO("-------------------------------------------------\n");
841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
842 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 842 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
844 if (request != NULL) 844 if (request != NULL)
845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
@@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
867 else 867 else
868 op = "---"; 868 op = "---";
869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
870 irb->scsw.dstat,irb->scsw.cstat); 870 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
872 sptr = (unsigned int *) irb->ecw; 872 sptr = (unsigned int *) irb->ecw;
873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
@@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1083 * error might still apply. So we just schedule the request to be 1083 * error might still apply. So we just schedule the request to be
1084 * started later. 1084 * started later.
1085 */ 1085 */
1086 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1086 if (irb->scsw.cmd.cc != 0 &&
1087 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1087 (request->status == TAPE_REQUEST_IN_IO)) { 1088 (request->status == TAPE_REQUEST_IN_IO)) {
1088 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1089 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1089 device->cdev_id, irb->scsw.cc, irb->scsw.fctl); 1090 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1090 request->status = TAPE_REQUEST_QUEUED; 1091 request->status = TAPE_REQUEST_QUEUED;
1091 schedule_delayed_work(&device->tape_dnr, HZ); 1092 schedule_delayed_work(&device->tape_dnr, HZ);
1092 return; 1093 return;
@@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 1095
1095 /* May be an unsolicited irq */ 1096 /* May be an unsolicited irq */
1096 if(request != NULL) 1097 if(request != NULL)
1097 request->rescnt = irb->scsw.count; 1098 request->rescnt = irb->scsw.cmd.count;
1098 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && 1099 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1099 !list_empty(&device->req_queue)) { 1100 !list_empty(&device->req_queue)) {
1100 /* Not Ready to Ready after long busy ? */ 1101 /* Not Ready to Ready after long busy ? */
1101 struct tape_request *req; 1102 struct tape_request *req;
@@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1111 return; 1112 return;
1112 } 1113 }
1113 } 1114 }
1114 if (irb->scsw.dstat != 0x0c) { 1115 if (irb->scsw.cmd.dstat != 0x0c) {
1115 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1116 /* Set the 'ONLINE' flag depending on sense byte 1 */
1116 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1117 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1117 device->tape_generic_status |= GMT_ONLINE(~0); 1118 device->tape_generic_status |= GMT_ONLINE(~0);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5043150019ac..a7fe6302c982 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -663,7 +663,7 @@ static int
663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) 663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
664{ 664{
665 /* Handle ATTN. Schedule tasklet to read aid. */ 665 /* Handle ATTN. Schedule tasklet to read aid. */
666 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 666 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
667 if (!tp->throttle) 667 if (!tp->throttle)
668 tty3270_issue_read(tp, 0); 668 tty3270_issue_read(tp, 0);
669 else 669 else
@@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
671 } 671 }
672 672
673 if (rq) { 673 if (rq) {
674 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 674 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
675 rq->rc = -EIO; 675 rq->rc = -EIO;
676 else 676 else
677 /* Normal end. Copy residual count. */ 677 /* Normal end. Copy residual count. */
678 rq->rescnt = irb->scsw.count; 678 rq->rescnt = irb->scsw.cmd.count;
679 } 679 }
680 return RAW3270_IO_DONE; 680 return RAW3270_IO_DONE;
681} 681}
@@ -1792,15 +1792,12 @@ static int __init tty3270_init(void)
1792 tty_set_operations(driver, &tty3270_ops); 1792 tty_set_operations(driver, &tty3270_ops);
1793 ret = tty_register_driver(driver); 1793 ret = tty_register_driver(driver);
1794 if (ret) { 1794 if (ret) {
1795 printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
1796 put_tty_driver(driver); 1795 put_tty_driver(driver);
1797 return ret; 1796 return ret;
1798 } 1797 }
1799 tty3270_driver = driver; 1798 tty3270_driver = driver;
1800 ret = raw3270_register_notifier(tty3270_notifier); 1799 ret = raw3270_register_notifier(tty3270_notifier);
1801 if (ret) { 1800 if (ret) {
1802 printk(KERN_ERR "tty3270 notifier registration failed "
1803 "with %d\n", ret);
1804 put_tty_driver(driver); 1801 put_tty_driver(driver);
1805 return ret; 1802 return ret;
1806 1803
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 2f419b0ea628..09e7d9bf438b 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/smp_lock.h>
19#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
20#include <asm/debug.h> 21#include <asm/debug.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
@@ -39,11 +40,14 @@ static int vmcp_open(struct inode *inode, struct file *file)
39 session = kmalloc(sizeof(*session), GFP_KERNEL); 40 session = kmalloc(sizeof(*session), GFP_KERNEL);
40 if (!session) 41 if (!session)
41 return -ENOMEM; 42 return -ENOMEM;
43
44 lock_kernel();
42 session->bufsize = PAGE_SIZE; 45 session->bufsize = PAGE_SIZE;
43 session->response = NULL; 46 session->response = NULL;
44 session->resp_size = 0; 47 session->resp_size = 0;
45 mutex_init(&session->mutex); 48 mutex_init(&session->mutex);
46 file->private_data = session; 49 file->private_data = session;
50 unlock_kernel();
47 return nonseekable_open(inode, file); 51 return nonseekable_open(inode, file);
48} 52}
49 53
@@ -61,30 +65,24 @@ static int vmcp_release(struct inode *inode, struct file *file)
61static ssize_t 65static ssize_t
62vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) 66vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
63{ 67{
64 size_t tocopy; 68 ssize_t ret;
69 size_t size;
65 struct vmcp_session *session; 70 struct vmcp_session *session;
66 71
67 session = (struct vmcp_session *)file->private_data; 72 session = file->private_data;
68 if (mutex_lock_interruptible(&session->mutex)) 73 if (mutex_lock_interruptible(&session->mutex))
69 return -ERESTARTSYS; 74 return -ERESTARTSYS;
70 if (!session->response) { 75 if (!session->response) {
71 mutex_unlock(&session->mutex); 76 mutex_unlock(&session->mutex);
72 return 0; 77 return 0;
73 } 78 }
74 if (*ppos > session->resp_size) { 79 size = min_t(size_t, session->resp_size, session->bufsize);
75 mutex_unlock(&session->mutex); 80 ret = simple_read_from_buffer(buff, count, ppos,
76 return 0; 81 session->response, size);
77 }
78 tocopy = min(session->resp_size - (size_t) (*ppos), count);
79 tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
80 82
81 if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
82 mutex_unlock(&session->mutex);
83 return -EFAULT;
84 }
85 mutex_unlock(&session->mutex); 83 mutex_unlock(&session->mutex);
86 *ppos += tocopy; 84
87 return tocopy; 85 return ret;
88} 86}
89 87
90static ssize_t 88static ssize_t
@@ -198,27 +196,23 @@ static int __init vmcp_init(void)
198 PRINT_WARN("z/VM CP interface is only available under z/VM\n"); 196 PRINT_WARN("z/VM CP interface is only available under z/VM\n");
199 return -ENODEV; 197 return -ENODEV;
200 } 198 }
199
201 vmcp_debug = debug_register("vmcp", 1, 1, 240); 200 vmcp_debug = debug_register("vmcp", 1, 1, 240);
202 if (!vmcp_debug) { 201 if (!vmcp_debug)
203 PRINT_ERR("z/VM CP interface not loaded. Could not register "
204 "debug feature\n");
205 return -ENOMEM; 202 return -ENOMEM;
206 } 203
207 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); 204 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
208 if (ret) { 205 if (ret) {
209 PRINT_ERR("z/VM CP interface not loaded. Could not register "
210 "debug feature view. Error code: %d\n", ret);
211 debug_unregister(vmcp_debug); 206 debug_unregister(vmcp_debug);
212 return ret; 207 return ret;
213 } 208 }
209
214 ret = misc_register(&vmcp_dev); 210 ret = misc_register(&vmcp_dev);
215 if (ret) { 211 if (ret) {
216 PRINT_ERR("z/VM CP interface not loaded. Could not register "
217 "misc device. Error code: %d\n", ret);
218 debug_unregister(vmcp_debug); 212 debug_unregister(vmcp_debug);
219 return ret; 213 return ret;
220 } 214 }
221 PRINT_INFO("z/VM CP interface loaded\n"); 215
222 return 0; 216 return 0;
223} 217}
224 218
@@ -226,7 +220,6 @@ static void __exit vmcp_exit(void)
226{ 220{
227 misc_deregister(&vmcp_dev); 221 misc_deregister(&vmcp_dev);
228 debug_unregister(vmcp_debug); 222 debug_unregister(vmcp_debug);
229 PRINT_INFO("z/VM CP interface unloaded.\n");
230} 223}
231 224
232module_init(vmcp_init); 225module_init(vmcp_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 2c2428cc05d8..c31faefa2b3b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -25,6 +25,7 @@
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/cdev.h> 26#include <linux/cdev.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/smp_lock.h>
28#include <linux/string.h> 29#include <linux/string.h>
29 30
30 31
@@ -216,9 +217,7 @@ static int vmlogrdr_get_recording_class_AB(void)
216 char *tail; 217 char *tail;
217 int len,i; 218 int len,i;
218 219
219 printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
220 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 220 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
221 printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
222 len = strnlen(cp_response,sizeof(cp_response)); 221 len = strnlen(cp_response,sizeof(cp_response));
223 // now the parsing 222 // now the parsing
224 tail=strnchr(cp_response,len,'='); 223 tail=strnchr(cp_response,len,'=');
@@ -268,11 +267,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
268 logptr->recording_name, 267 logptr->recording_name,
269 qid_string); 268 qid_string);
270 269
271 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
272 cp_command);
273 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 270 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
274 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
275 cp_response);
276 } 271 }
277 272
278 memset(cp_command, 0x00, sizeof(cp_command)); 273 memset(cp_command, 0x00, sizeof(cp_command));
@@ -282,10 +277,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
282 onoff, 277 onoff,
283 qid_string); 278 qid_string);
284 279
285 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
286 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 280 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
287 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
288 cp_response);
289 /* The recording command will usually answer with 'Command complete' 281 /* The recording command will usually answer with 'Command complete'
290 * on success, but when the specific service was never connected 282 * on success, but when the specific service was never connected
291 * before then there might be an additional informational message 283 * before then there might be an additional informational message
@@ -319,9 +311,11 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
319 return -ENOSYS; 311 return -ENOSYS;
320 312
321 /* Besure this device hasn't already been opened */ 313 /* Besure this device hasn't already been opened */
314 lock_kernel();
322 spin_lock_bh(&logptr->priv_lock); 315 spin_lock_bh(&logptr->priv_lock);
323 if (logptr->dev_in_use) { 316 if (logptr->dev_in_use) {
324 spin_unlock_bh(&logptr->priv_lock); 317 spin_unlock_bh(&logptr->priv_lock);
318 unlock_kernel();
325 return -EBUSY; 319 return -EBUSY;
326 } 320 }
327 logptr->dev_in_use = 1; 321 logptr->dev_in_use = 1;
@@ -365,7 +359,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
365 || (logptr->iucv_path_severed)); 359 || (logptr->iucv_path_severed));
366 if (logptr->iucv_path_severed) 360 if (logptr->iucv_path_severed)
367 goto out_record; 361 goto out_record;
368 return nonseekable_open(inode, filp); 362 ret = nonseekable_open(inode, filp);
363 unlock_kernel();
364 return ret;
369 365
370out_record: 366out_record:
371 if (logptr->autorecording) 367 if (logptr->autorecording)
@@ -375,6 +371,7 @@ out_path:
375 logptr->path = NULL; 371 logptr->path = NULL;
376out_dev: 372out_dev:
377 logptr->dev_in_use = 0; 373 logptr->dev_in_use = 0;
374 unlock_kernel();
378 return -EIO; 375 return -EIO;
379} 376}
380 377
@@ -567,10 +564,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
567 "RECORDING %s PURGE ", 564 "RECORDING %s PURGE ",
568 priv->recording_name); 565 priv->recording_name);
569 566
570 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
571 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 567 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
572 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
573 cp_response);
574 568
575 return count; 569 return count;
576} 570}
@@ -682,28 +676,20 @@ static int vmlogrdr_register_driver(void)
682 676
683 /* Register with iucv driver */ 677 /* Register with iucv driver */
684 ret = iucv_register(&vmlogrdr_iucv_handler, 1); 678 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
685 if (ret) { 679 if (ret)
686 printk (KERN_ERR "vmlogrdr: failed to register with "
687 "iucv driver\n");
688 goto out; 680 goto out;
689 }
690 681
691 ret = driver_register(&vmlogrdr_driver); 682 ret = driver_register(&vmlogrdr_driver);
692 if (ret) { 683 if (ret)
693 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
694 goto out_iucv; 684 goto out_iucv;
695 }
696 685
697 ret = driver_create_file(&vmlogrdr_driver, 686 ret = driver_create_file(&vmlogrdr_driver,
698 &driver_attr_recording_status); 687 &driver_attr_recording_status);
699 if (ret) { 688 if (ret)
700 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
701 goto out_driver; 689 goto out_driver;
702 }
703 690
704 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 691 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
705 if (IS_ERR(vmlogrdr_class)) { 692 if (IS_ERR(vmlogrdr_class)) {
706 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
707 ret = PTR_ERR(vmlogrdr_class); 693 ret = PTR_ERR(vmlogrdr_class);
708 vmlogrdr_class = NULL; 694 vmlogrdr_class = NULL;
709 goto out_attr; 695 goto out_attr;
@@ -871,12 +857,10 @@ static int __init vmlogrdr_init(void)
871 rc = vmlogrdr_register_cdev(dev); 857 rc = vmlogrdr_register_cdev(dev);
872 if (rc) 858 if (rc)
873 goto cleanup; 859 goto cleanup;
874 printk (KERN_INFO "vmlogrdr: driver loaded\n");
875 return 0; 860 return 0;
876 861
877cleanup: 862cleanup:
878 vmlogrdr_cleanup(); 863 vmlogrdr_cleanup();
879 printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
880 return rc; 864 return rc;
881} 865}
882 866
@@ -884,7 +868,6 @@ cleanup:
884static void __exit vmlogrdr_exit(void) 868static void __exit vmlogrdr_exit(void)
885{ 869{
886 vmlogrdr_cleanup(); 870 vmlogrdr_cleanup();
887 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
888 return; 871 return;
889} 872}
890 873
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 83ae9a852f00..0a9f1cccbe58 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/cdev.h> 11#include <linux/cdev.h>
12#include <linux/smp_lock.h>
12 13
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
@@ -277,7 +278,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
277 struct urdev *urd; 278 struct urdev *urd;
278 279
279 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 280 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
280 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); 281 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
282 irb->scsw.cmd.count);
281 283
282 if (!intparm) { 284 if (!intparm) {
283 TRACE("ur_int_handler: unsolicited interrupt\n"); 285 TRACE("ur_int_handler: unsolicited interrupt\n");
@@ -288,7 +290,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
288 /* On special conditions irb is an error pointer */ 290 /* On special conditions irb is an error pointer */
289 if (IS_ERR(irb)) 291 if (IS_ERR(irb))
290 urd->io_request_rc = PTR_ERR(irb); 292 urd->io_request_rc = PTR_ERR(irb);
291 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 293 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
292 urd->io_request_rc = 0; 294 urd->io_request_rc = 0;
293 else 295 else
294 urd->io_request_rc = -EIO; 296 urd->io_request_rc = -EIO;
@@ -668,7 +670,7 @@ static int ur_open(struct inode *inode, struct file *file)
668 670
669 if (accmode == O_RDWR) 671 if (accmode == O_RDWR)
670 return -EACCES; 672 return -EACCES;
671 673 lock_kernel();
672 /* 674 /*
673 * We treat the minor number as the devno of the ur device 675 * We treat the minor number as the devno of the ur device
674 * to find in the driver tree. 676 * to find in the driver tree.
@@ -676,8 +678,10 @@ static int ur_open(struct inode *inode, struct file *file)
676 devno = MINOR(file->f_dentry->d_inode->i_rdev); 678 devno = MINOR(file->f_dentry->d_inode->i_rdev);
677 679
678 urd = urdev_get_from_devno(devno); 680 urd = urdev_get_from_devno(devno);
679 if (!urd) 681 if (!urd) {
680 return -ENXIO; 682 rc = -ENXIO;
683 goto out;
684 }
681 685
682 spin_lock(&urd->open_lock); 686 spin_lock(&urd->open_lock);
683 while (urd->open_flag) { 687 while (urd->open_flag) {
@@ -720,6 +724,7 @@ static int ur_open(struct inode *inode, struct file *file)
720 goto fail_urfile_free; 724 goto fail_urfile_free;
721 urf->file_reclen = rc; 725 urf->file_reclen = rc;
722 file->private_data = urf; 726 file->private_data = urf;
727 unlock_kernel();
723 return 0; 728 return 0;
724 729
725fail_urfile_free: 730fail_urfile_free:
@@ -730,6 +735,8 @@ fail_unlock:
730 spin_unlock(&urd->open_lock); 735 spin_unlock(&urd->open_lock);
731fail_put: 736fail_put:
732 urdev_put(urd); 737 urdev_put(urd);
738out:
739 unlock_kernel();
733 return rc; 740 return rc;
734} 741}
735 742
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 19f8389291b6..21a2a829bf4e 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/watchdog.h> 15#include <linux/watchdog.h>
16#include <linux/smp_lock.h>
16 17
17#include <asm/ebcdic.h> 18#include <asm/ebcdic.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -92,23 +93,15 @@ static int vmwdt_keepalive(void)
92 93
93 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; 94 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
94 ret = __diag288(func, vmwdt_interval, ebc_cmd, len); 95 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
96 WARN_ON(ret != 0);
95 kfree(ebc_cmd); 97 kfree(ebc_cmd);
96
97 if (ret) {
98 printk(KERN_WARNING "%s: problem setting interval %d, "
99 "cmd %s\n", __func__, vmwdt_interval,
100 vmwdt_cmd);
101 }
102 return ret; 98 return ret;
103} 99}
104 100
105static int vmwdt_disable(void) 101static int vmwdt_disable(void)
106{ 102{
107 int ret = __diag288(wdt_cancel, 0, "", 0); 103 int ret = __diag288(wdt_cancel, 0, "", 0);
108 if (ret) { 104 WARN_ON(ret != 0);
109 printk(KERN_WARNING "%s: problem disabling watchdog\n",
110 __func__);
111 }
112 return ret; 105 return ret;
113} 106}
114 107
@@ -121,21 +114,23 @@ static int __init vmwdt_probe(void)
121 static char __initdata ebc_begin[] = { 114 static char __initdata ebc_begin[] = {
122 194, 197, 199, 201, 213 115 194, 197, 199, 201, 213
123 }; 116 };
124 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { 117 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
125 printk(KERN_INFO "z/VM watchdog not available\n");
126 return -EINVAL; 118 return -EINVAL;
127 }
128 return vmwdt_disable(); 119 return vmwdt_disable();
129} 120}
130 121
131static int vmwdt_open(struct inode *i, struct file *f) 122static int vmwdt_open(struct inode *i, struct file *f)
132{ 123{
133 int ret; 124 int ret;
134 if (test_and_set_bit(0, &vmwdt_is_open)) 125 lock_kernel();
126 if (test_and_set_bit(0, &vmwdt_is_open)) {
127 unlock_kernel();
135 return -EBUSY; 128 return -EBUSY;
129 }
136 ret = vmwdt_keepalive(); 130 ret = vmwdt_keepalive();
137 if (ret) 131 if (ret)
138 clear_bit(0, &vmwdt_is_open); 132 clear_bit(0, &vmwdt_is_open);
133 unlock_kernel();
139 return ret ? ret : nonseekable_open(i, f); 134 return ret ? ret : nonseekable_open(i, f);
140} 135}
141 136
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bbbd14e9d48f..047dd92ae804 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch)
223 /* get info for boot cpu from lowcore, stored in the HSA */ 223 /* get info for boot cpu from lowcore, stored in the HSA */
224 224
225 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 225 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
226 if (!sa) { 226 if (!sa)
227 ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
228 return -ENOMEM; 227 return -ENOMEM;
229 }
230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
231 ERROR_MSG("could not copy from HSA\n"); 229 TRACE("could not copy from HSA\n");
232 kfree(sa); 230 kfree(sa);
233 return -EIO; 231 return -EIO;
234 } 232 }
@@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map)
511 */ 509 */
512static int __init sys_info_init(enum arch_id arch) 510static int __init sys_info_init(enum arch_id arch)
513{ 511{
512 int rc;
513
514 switch (arch) { 514 switch (arch) {
515 case ARCH_S390X: 515 case ARCH_S390X:
516 MSG("DETECTED 'S390X (64 bit) OS'\n"); 516 MSG("DETECTED 'S390X (64 bit) OS'\n");
@@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch)
529 return -EINVAL; 529 return -EINVAL;
530 } 530 }
531 sys_info.arch = arch; 531 sys_info.arch = arch;
532 if (init_cpu_info(arch)) { 532 rc = init_cpu_info(arch);
533 ERROR_MSG("get cpu info failed\n"); 533 if (rc)
534 return -ENOMEM; 534 return rc;
535 }
536 sys_info.mem_size = real_memory_size; 535 sys_info.mem_size = real_memory_size;
537 536
538 return 0; 537 return 0;
@@ -544,12 +543,12 @@ static int __init check_sdias(void)
544 543
545 rc = sclp_sdias_blk_count(); 544 rc = sclp_sdias_blk_count();
546 if (rc < 0) { 545 if (rc < 0) {
547 ERROR_MSG("Could not determine HSA size\n"); 546 TRACE("Could not determine HSA size\n");
548 return rc; 547 return rc;
549 } 548 }
550 act_hsa_size = (rc - 1) * PAGE_SIZE; 549 act_hsa_size = (rc - 1) * PAGE_SIZE;
551 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { 550 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
552 ERROR_MSG("HSA size too small: %i\n", act_hsa_size); 551 TRACE("HSA size too small: %i\n", act_hsa_size);
553 return -EINVAL; 552 return -EINVAL;
554 } 553 }
555 return 0; 554 return 0;
@@ -590,16 +589,12 @@ static int __init zcore_init(void)
590 goto fail; 589 goto fail;
591 590
592 rc = check_sdias(); 591 rc = check_sdias();
593 if (rc) { 592 if (rc)
594 ERROR_MSG("Dump initialization failed\n");
595 goto fail; 593 goto fail;
596 }
597 594
598 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
599 if (rc) { 596 if (rc)
600 ERROR_MSG("sdial memcpy for arch id failed\n");
601 goto fail; 597 goto fail;
602 }
603 598
604#ifndef __s390x__ 599#ifndef __s390x__
605 if (arch == ARCH_S390X) { 600 if (arch == ARCH_S390X) {
@@ -610,10 +605,8 @@ static int __init zcore_init(void)
610#endif 605#endif
611 606
612 rc = sys_info_init(arch); 607 rc = sys_info_init(arch);
613 if (rc) { 608 if (rc)
614 ERROR_MSG("arch init failed\n");
615 goto fail; 609 goto fail;
616 }
617 610
618 zcore_header_init(arch, &zcore_header); 611 zcore_header_init(arch, &zcore_header);
619 612
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index cfaf77b320f5..91e9e3f3073a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,9 +2,11 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
9obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
10obj-$(CONFIG_QDIO) += qdio.o 12obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index b7a07a866291..fe6cea15bbaf 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -15,6 +15,7 @@
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h> 17#include <asm/airq.h>
18#include <asm/isc.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -33,15 +34,15 @@ struct airq_t {
33 void *drv_data; 34 void *drv_data;
34}; 35};
35 36
36static union indicator_t indicators; 37static union indicator_t indicators[MAX_ISC];
37static struct airq_t *airqs[NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
38 39
39static int register_airq(struct airq_t *airq) 40static int register_airq(struct airq_t *airq, u8 isc)
40{ 41{
41 int i; 42 int i;
42 43
43 for (i = 0; i < NR_AIRQS; i++) 44 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq)) 45 if (!cmpxchg(&airqs[isc][i], NULL, airq))
45 return i; 46 return i;
46 return -ENOMEM; 47 return -ENOMEM;
47} 48}
@@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq)
50 * s390_register_adapter_interrupt() - register adapter interrupt handler 51 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered 52 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler 53 * @drv_data: driver data passed with each call to the handler
54 * @isc: isc for which the handler should be called
53 * 55 *
54 * Returns: 56 * Returns:
55 * Pointer to the indicator to be used on success 57 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed 58 * ERR_PTR() if registration failed
57 */ 59 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler, 60void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data) 61 void *drv_data, u8 isc)
60{ 62{
61 struct airq_t *airq; 63 struct airq_t *airq;
62 char dbf_txt[16]; 64 char dbf_txt[16];
63 int ret; 65 int ret;
64 66
67 if (isc > MAX_ISC)
68 return ERR_PTR(-EINVAL);
65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); 69 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
66 if (!airq) { 70 if (!airq) {
67 ret = -ENOMEM; 71 ret = -ENOMEM;
@@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
69 } 73 }
70 airq->handler = handler; 74 airq->handler = handler;
71 airq->drv_data = drv_data; 75 airq->drv_data = drv_data;
72 ret = register_airq(airq); 76
73 if (ret < 0) 77 ret = register_airq(airq, isc);
74 kfree(airq);
75out: 78out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); 79 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt); 80 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0) 81 if (ret < 0) {
82 kfree(airq);
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 else 84 } else
81 return &indicators.byte[ret]; 85 return &indicators[isc].byte[ret];
82} 86}
83EXPORT_SYMBOL(s390_register_adapter_interrupt); 87EXPORT_SYMBOL(s390_register_adapter_interrupt);
84 88
85/** 89/**
86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler 90 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered 91 * @ind: indicator for which the handler is to be unregistered
92 * @isc: interruption subclass
88 */ 93 */
89void s390_unregister_adapter_interrupt(void *ind) 94void s390_unregister_adapter_interrupt(void *ind, u8 isc)
90{ 95{
91 struct airq_t *airq; 96 struct airq_t *airq;
92 char dbf_txt[16]; 97 char dbf_txt[16];
93 int i; 98 int i;
94 99
95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); 100 i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); 101 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt); 102 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0; 103 indicators[isc].byte[i] = 0;
99 airq = xchg(&airqs[i], NULL); 104 airq = xchg(&airqs[isc][i], NULL);
100 /* 105 /*
101 * Allow interrupts to complete. This will ensure that the airq handle 106 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler. 107 * is no longer referenced by any interrupt handler.
@@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108 113
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) 114#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
110 115
111void do_adapter_IO(void) 116void do_adapter_IO(u8 isc)
112{ 117{
113 int w; 118 int w;
114 int i; 119 int i;
@@ -120,22 +125,22 @@ void do_adapter_IO(void)
120 * fetch operations. 125 * fetch operations.
121 */ 126 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) { 127 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w]; 128 word = indicators[isc].word[w];
124 i = w * NR_AIRQS_PER_WORD; 129 i = w * NR_AIRQS_PER_WORD;
125 /* 130 /*
126 * Check bytes within word for active indicators. 131 * Check bytes within word for active indicators.
127 */ 132 */
128 while (word) { 133 while (word) {
129 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
130 airq = airqs[i]; 135 airq = airqs[isc][i];
131 if (likely(airq)) 136 if (likely(airq))
132 airq->handler(&indicators.byte[i], 137 airq->handler(&indicators[isc].byte[i],
133 airq->drv_data); 138 airq->drv_data);
134 else 139 else
135 /* 140 /*
136 * Reset ill-behaved indicator. 141 * Reset ill-behaved indicator.
137 */ 142 */
138 indicators.byte[i] = 0; 143 indicators[isc].byte[i] = 0;
139 } 144 }
140 word <<= 8; 145 word <<= 8;
141 i++; 146 i++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 297cdceb0ca4..db00b0591733 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -18,6 +18,7 @@
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20 20
21#include "../s390mach.h"
21#include "cio.h" 22#include "cio.h"
22#include "css.h" 23#include "css.h"
23#include "ioasm.h" 24#include "ioasm.h"
@@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
94 } 95 }
95 return opm; 96 return opm;
96} 97}
98EXPORT_SYMBOL_GPL(chp_get_sch_opm);
97 99
98/** 100/**
99 * chp_is_registered - check if a channel-path is registered 101 * chp_is_registered - check if a channel-path is registered
@@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
121 CIO_TRACE_EVENT(2, dbf_text); 123 CIO_TRACE_EVENT(2, dbf_text);
122 124
123 status = chp_get_status(chpid); 125 status = chp_get_status(chpid);
124 if (!on && !status) { 126 if (!on && !status)
125 printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", 127 return 0;
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129 128
130 set_chp_logically_online(chpid, on); 129 set_chp_logically_online(chpid, on);
131 chsc_chp_vary(chpid, on); 130 chsc_chp_vary(chpid, on);
@@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
141{ 140{
142 struct channel_path *chp; 141 struct channel_path *chp;
143 struct device *device; 142 struct device *device;
144 unsigned int size;
145 143
146 device = container_of(kobj, struct device, kobj); 144 device = container_of(kobj, struct device, kobj);
147 chp = to_channelpath(device); 145 chp = to_channelpath(device);
148 if (!chp->cmg_chars) 146 if (!chp->cmg_chars)
149 return 0; 147 return 0;
150 148
151 size = sizeof(struct cmg_chars); 149 return memory_read_from_buffer(buf, count, &off,
152 150 chp->cmg_chars, sizeof(struct cmg_chars));
153 if (off > size)
154 return 0;
155 if (off + count > size)
156 count = size - off;
157 memcpy(buf, chp->cmg_chars + off, count);
158 return count;
159} 151}
160 152
161static struct bin_attribute chp_measurement_chars_attr = { 153static struct bin_attribute chp_measurement_chars_attr = {
@@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid)
405 chpid.id); 397 chpid.id);
406 398
407 /* Obtain channel path description and fill it in. */ 399 /* Obtain channel path description and fill it in. */
408 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
409 if (ret) 401 if (ret)
410 goto out_free; 402 goto out_free;
411 if ((chp->desc.flags & 0x80) == 0) { 403 if ((chp->desc.flags & 0x80) == 0) {
@@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid)
413 goto out_free; 405 goto out_free;
414 } 406 }
415 /* Get channel-measurement characteristics. */ 407 /* Get channel-measurement characteristics. */
416 if (css_characteristics_avail && css_chsc_characteristics.scmc 408 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
417 && css_chsc_characteristics.secm) {
418 ret = chsc_get_channel_measurement_chars(chp); 409 ret = chsc_get_channel_measurement_chars(chp);
419 if (ret) 410 if (ret)
420 goto out_free; 411 goto out_free;
@@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid)
476 467
477/** 468/**
478 * chp_process_crw - process channel-path status change 469 * chp_process_crw - process channel-path status change
479 * @id: channel-path ID number 470 * @crw0: channel report-word to handler
480 * @status: non-zero if channel-path has become available, zero otherwise 471 * @crw1: second channel-report word (always NULL)
472 * @overflow: crw overflow indication
481 * 473 *
482 * Handle channel-report-words indicating that the status of a channel-path 474 * Handle channel-report-words indicating that the status of a channel-path
483 * has changed. 475 * has changed.
484 */ 476 */
485void chp_process_crw(int id, int status) 477static void chp_process_crw(struct crw *crw0, struct crw *crw1,
478 int overflow)
486{ 479{
487 struct chp_id chpid; 480 struct chp_id chpid;
488 481
482 if (overflow) {
483 css_schedule_eval_all();
484 return;
485 }
486 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
487 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
488 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
489 crw0->erc, crw0->rsid);
490 /*
491 * Check for solicited machine checks. These are
492 * created by reset channel path and need not be
493 * handled here.
494 */
495 if (crw0->slct) {
496 CIO_CRW_EVENT(2, "solicited machine check for "
497 "channel path %02X\n", crw0->rsid);
498 return;
499 }
489 chp_id_init(&chpid); 500 chp_id_init(&chpid);
490 chpid.id = id; 501 chpid.id = crw0->rsid;
491 if (status) { 502 switch (crw0->erc) {
503 case CRW_ERC_IPARM: /* Path has come. */
492 if (!chp_is_registered(chpid)) 504 if (!chp_is_registered(chpid))
493 chp_new(chpid); 505 chp_new(chpid);
494 chsc_chp_online(chpid); 506 chsc_chp_online(chpid);
495 } else 507 break;
508 case CRW_ERC_PERRI: /* Path has gone. */
509 case CRW_ERC_PERRN:
496 chsc_chp_offline(chpid); 510 chsc_chp_offline(chpid);
511 break;
512 default:
513 CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
514 crw0->erc);
515 }
497} 516}
498 517
518int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
519{
520 int i;
521 int mask;
522
523 for (i = 0; i < 8; i++) {
524 mask = 0x80 >> i;
525 if (!(ssd->path_mask & mask))
526 continue;
527 if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
528 continue;
529 if ((ssd->fla_valid_mask & mask) &&
530 ((ssd->fla[i] & link->fla_mask) != link->fla))
531 continue;
532 return mask;
533 }
534 return 0;
535}
536EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
537
499static inline int info_bit_num(struct chp_id id) 538static inline int info_bit_num(struct chp_id id)
500{ 539{
501 return id.id + id.cssid * (__MAX_CHPID + 1); 540 return id.id + id.cssid * (__MAX_CHPID + 1);
@@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work)
575{ 614{
576 struct chp_id chpid; 615 struct chp_id chpid;
577 enum cfg_task_t t; 616 enum cfg_task_t t;
617 int rc;
578 618
579 mutex_lock(&cfg_lock); 619 mutex_lock(&cfg_lock);
580 t = cfg_none; 620 t = cfg_none;
@@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work)
589 629
590 switch (t) { 630 switch (t) {
591 case cfg_configure: 631 case cfg_configure:
592 sclp_chp_configure(chpid); 632 rc = sclp_chp_configure(chpid);
593 info_expire(); 633 if (rc)
594 chsc_chp_online(chpid); 634 CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
635 "%d\n", chpid.cssid, chpid.id, rc);
636 else {
637 info_expire();
638 chsc_chp_online(chpid);
639 }
595 break; 640 break;
596 case cfg_deconfigure: 641 case cfg_deconfigure:
597 sclp_chp_deconfigure(chpid); 642 rc = sclp_chp_deconfigure(chpid);
598 info_expire(); 643 if (rc)
599 chsc_chp_offline(chpid); 644 CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
645 "%d\n", chpid.cssid, chpid.id, rc);
646 else {
647 info_expire();
648 chsc_chp_offline(chpid);
649 }
600 break; 650 break;
601 case cfg_none: 651 case cfg_none:
602 /* Get updated information after last change. */ 652 /* Get updated information after last change. */
@@ -654,10 +704,16 @@ static int cfg_wait_idle(void)
654static int __init chp_init(void) 704static int __init chp_init(void)
655{ 705{
656 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret;
657 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret)
711 return ret;
658 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
659 if (!chp_wq) 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH);
660 return -ENOMEM; 715 return -ENOMEM;
716 }
661 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
662 init_waitqueue_head(&cfg_wait_queue); 718 init_waitqueue_head(&cfg_wait_queue);
663 if (info_update()) 719 if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 65286563c592..26c3d2246176 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -12,12 +12,24 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <asm/chpid.h> 13#include <asm/chpid.h>
14#include "chsc.h" 14#include "chsc.h"
15#include "css.h"
15 16
16#define CHP_STATUS_STANDBY 0 17#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1 18#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2 19#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3 20#define CHP_STATUS_NOT_RECOGNIZED 3
20 21
22#define CHP_ONLINE 0
23#define CHP_OFFLINE 1
24#define CHP_VARY_ON 2
25#define CHP_VARY_OFF 3
26
27struct chp_link {
28 struct chp_id chpid;
29 u32 fla_mask;
30 u16 fla;
31};
32
21static inline int chp_test_bit(u8 *bitmap, int num) 33static inline int chp_test_bit(u8 *bitmap, int num)
22{ 34{
23 int byte = num >> 3; 35 int byte = num >> 3;
@@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch); 54u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid); 55int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid); 56void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp); 57void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp); 58int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid); 59int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure); 60void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid); 61void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid); 62int chp_info_get_status(struct chp_id chpid);
52 63int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
53#endif /* S390_CHP_H */ 64#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 5de86908b0d0..65264a38057d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2008
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -16,7 +15,9 @@
16 15
17#include <asm/cio.h> 16#include <asm/cio.h>
18#include <asm/chpid.h> 17#include <asm/chpid.h>
18#include <asm/chsc.h>
19 19
20#include "../s390mach.h"
20#include "css.h" 21#include "css.h"
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -127,77 +128,12 @@ out_free:
127 return ret; 128 return ret;
128} 129}
129 130
130static int check_for_io_on_path(struct subchannel *sch, int mask)
131{
132 int cc;
133
134 cc = stsch(sch->schid, &sch->schib);
135 if (cc)
136 return 0;
137 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138 return 1;
139 return 0;
140}
141
142static void terminate_internal_io(struct subchannel *sch)
143{
144 if (cio_clear(sch)) {
145 /* Recheck device in case clear failed. */
146 sch->lpm = 0;
147 if (device_trigger_verify(sch) != 0)
148 css_schedule_eval(sch->schid);
149 return;
150 }
151 /* Request retry of internal operation. */
152 device_set_intretry(sch);
153 /* Call handler. */
154 if (sch->driver && sch->driver->termination)
155 sch->driver->termination(sch);
156}
157
158static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 131static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159{ 132{
160 int j;
161 int mask;
162 struct chp_id *chpid = data;
163 struct schib schib;
164
165 for (j = 0; j < 8; j++) {
166 mask = 0x80 >> j;
167 if ((sch->schib.pmcw.pim & mask) &&
168 (sch->schib.pmcw.chpid[j] == chpid->id))
169 break;
170 }
171 if (j >= 8)
172 return 0;
173
174 spin_lock_irq(sch->lock); 133 spin_lock_irq(sch->lock);
175 134 if (sch->driver && sch->driver->chp_event)
176 stsch(sch->schid, &schib); 135 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
177 if (!css_sch_is_valid(&schib))
178 goto out_unreg;
179 memcpy(&sch->schib, &schib, sizeof(struct schib));
180 /* Check for single path devices. */
181 if (sch->schib.pmcw.pim == 0x80)
182 goto out_unreg;
183
184 if (check_for_io_on_path(sch, mask)) {
185 if (device_is_online(sch))
186 device_kill_io(sch);
187 else {
188 terminate_internal_io(sch);
189 /* Re-start path verification. */
190 if (sch->driver && sch->driver->verify)
191 sch->driver->verify(sch);
192 }
193 } else {
194 /* trigger path verification. */
195 if (sch->driver && sch->driver->verify)
196 sch->driver->verify(sch);
197 else if (sch->lpm == mask)
198 goto out_unreg; 136 goto out_unreg;
199 }
200
201 spin_unlock_irq(sch->lock); 137 spin_unlock_irq(sch->lock);
202 return 0; 138 return 0;
203 139
@@ -211,15 +147,18 @@ out_unreg:
211void chsc_chp_offline(struct chp_id chpid) 147void chsc_chp_offline(struct chp_id chpid)
212{ 148{
213 char dbf_txt[15]; 149 char dbf_txt[15];
150 struct chp_link link;
214 151
215 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 152 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216 CIO_TRACE_EVENT(2, dbf_txt); 153 CIO_TRACE_EVENT(2, dbf_txt);
217 154
218 if (chp_get_status(chpid) <= 0) 155 if (chp_get_status(chpid) <= 0)
219 return; 156 return;
157 memset(&link, 0, sizeof(struct chp_link));
158 link.chpid = chpid;
220 /* Wait until previous actions have settled. */ 159 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path(); 160 css_wait_for_slow_path();
222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 161 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
223} 162}
224 163
225static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 164static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
@@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
242 return 0; 181 return 0;
243} 182}
244 183
245struct res_acc_data {
246 struct chp_id chpid;
247 u32 fla_mask;
248 u16 fla;
249};
250
251static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 struct res_acc_data *data)
253{
254 int i;
255 int mask;
256
257 for (i = 0; i < 8; i++) {
258 mask = 0x80 >> i;
259 if (!(ssd->path_mask & mask))
260 continue;
261 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262 continue;
263 if ((ssd->fla_valid_mask & mask) &&
264 ((ssd->fla[i] & data->fla_mask) != data->fla))
265 continue;
266 return mask;
267 }
268 return 0;
269}
270
271static int __s390_process_res_acc(struct subchannel *sch, void *data) 184static int __s390_process_res_acc(struct subchannel *sch, void *data)
272{ 185{
273 int chp_mask, old_lpm;
274 struct res_acc_data *res_data = data;
275
276 spin_lock_irq(sch->lock); 186 spin_lock_irq(sch->lock);
277 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 187 if (sch->driver && sch->driver->chp_event)
278 if (chp_mask == 0) 188 sch->driver->chp_event(sch, data, CHP_ONLINE);
279 goto out;
280 if (stsch(sch->schid, &sch->schib))
281 goto out;
282 old_lpm = sch->lpm;
283 sch->lpm = ((sch->schib.pmcw.pim &
284 sch->schib.pmcw.pam &
285 sch->schib.pmcw.pom)
286 | chp_mask) & sch->opm;
287 if (!old_lpm && sch->lpm)
288 device_trigger_reprobe(sch);
289 else if (sch->driver && sch->driver->verify)
290 sch->driver->verify(sch);
291out:
292 spin_unlock_irq(sch->lock); 189 spin_unlock_irq(sch->lock);
293 190
294 return 0; 191 return 0;
295} 192}
296 193
297static void s390_process_res_acc (struct res_acc_data *res_data) 194static void s390_process_res_acc(struct chp_link *link)
298{ 195{
299 char dbf_txt[15]; 196 char dbf_txt[15];
300 197
301 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, 198 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
302 res_data->chpid.id); 199 link->chpid.id);
303 CIO_TRACE_EVENT( 2, dbf_txt); 200 CIO_TRACE_EVENT( 2, dbf_txt);
304 if (res_data->fla != 0) { 201 if (link->fla != 0) {
305 sprintf(dbf_txt, "fla%x", res_data->fla); 202 sprintf(dbf_txt, "fla%x", link->fla);
306 CIO_TRACE_EVENT( 2, dbf_txt); 203 CIO_TRACE_EVENT( 2, dbf_txt);
307 } 204 }
308 /* Wait until previous actions have settled. */ 205 /* Wait until previous actions have settled. */
@@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
315 * will we have to do. 212 * will we have to do.
316 */ 213 */
317 for_each_subchannel_staged(__s390_process_res_acc, 214 for_each_subchannel_staged(__s390_process_res_acc,
318 s390_process_res_acc_new_sch, res_data); 215 s390_process_res_acc_new_sch, link);
319} 216}
320 217
321static int 218static int
@@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
388 285
389static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 286static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
390{ 287{
391 struct res_acc_data res_data; 288 struct chp_link link;
392 struct chp_id chpid; 289 struct chp_id chpid;
393 int status; 290 int status;
394 291
@@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
404 chp_new(chpid); 301 chp_new(chpid);
405 else if (!status) 302 else if (!status)
406 return; 303 return;
407 memset(&res_data, 0, sizeof(struct res_acc_data)); 304 memset(&link, 0, sizeof(struct chp_link));
408 res_data.chpid = chpid; 305 link.chpid = chpid;
409 if ((sei_area->vf & 0xc0) != 0) { 306 if ((sei_area->vf & 0xc0) != 0) {
410 res_data.fla = sei_area->fla; 307 link.fla = sei_area->fla;
411 if ((sei_area->vf & 0xc0) == 0xc0) 308 if ((sei_area->vf & 0xc0) == 0xc0)
412 /* full link address */ 309 /* full link address */
413 res_data.fla_mask = 0xffff; 310 link.fla_mask = 0xffff;
414 else 311 else
415 /* link address */ 312 /* link address */
416 res_data.fla_mask = 0xff00; 313 link.fla_mask = 0xff00;
417 } 314 }
418 s390_process_res_acc(&res_data); 315 s390_process_res_acc(&link);
419} 316}
420 317
421struct chp_config_data { 318struct chp_config_data {
@@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
480 } 377 }
481} 378}
482 379
483void chsc_process_crw(void) 380static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
484{ 381{
485 struct chsc_sei_area *sei_area; 382 struct chsc_sei_area *sei_area;
486 383
384 if (overflow) {
385 css_schedule_eval_all();
386 return;
387 }
388 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
389 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
390 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
391 crw0->erc, crw0->rsid);
487 if (!sei_page) 392 if (!sei_page)
488 return; 393 return;
489 /* Access to sei_page is serialized through machine check handler 394 /* Access to sei_page is serialized through machine check handler
490 * thread, so no need for locking. */ 395 * thread, so no need for locking. */
491 sei_area = sei_page; 396 sei_area = sei_page;
492 397
493 CIO_TRACE_EVENT( 2, "prcss"); 398 CIO_TRACE_EVENT(2, "prcss");
494 do { 399 do {
495 memset(sei_area, 0, sizeof(*sei_area)); 400 memset(sei_area, 0, sizeof(*sei_area));
496 sei_area->request.length = 0x0010; 401 sei_area->request.length = 0x0010;
@@ -509,114 +414,36 @@ void chsc_process_crw(void)
509 } while (sei_area->flags & 0x80); 414 } while (sei_area->flags & 0x80);
510} 415}
511 416
512static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513{
514 struct schib schib;
515
516 if (stsch_err(schid, &schib))
517 /* We're through */
518 return -ENXIO;
519
520 /* Put it on the slow path. */
521 css_schedule_eval(schid);
522 return 0;
523}
524
525
526static int __chp_add(struct subchannel *sch, void *data)
527{
528 int i, mask;
529 struct chp_id *chpid = data;
530
531 spin_lock_irq(sch->lock);
532 for (i=0; i<8; i++) {
533 mask = 0x80 >> i;
534 if ((sch->schib.pmcw.pim & mask) &&
535 (sch->schib.pmcw.chpid[i] == chpid->id))
536 break;
537 }
538 if (i==8) {
539 spin_unlock_irq(sch->lock);
540 return 0;
541 }
542 if (stsch(sch->schid, &sch->schib)) {
543 spin_unlock_irq(sch->lock);
544 css_schedule_eval(sch->schid);
545 return 0;
546 }
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
549 sch->schib.pmcw.pom)
550 | mask) & sch->opm;
551
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
554
555 spin_unlock_irq(sch->lock);
556
557 return 0;
558}
559
560void chsc_chp_online(struct chp_id chpid) 417void chsc_chp_online(struct chp_id chpid)
561{ 418{
562 char dbf_txt[15]; 419 char dbf_txt[15];
420 struct chp_link link;
563 421
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 422 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt); 423 CIO_TRACE_EVENT(2, dbf_txt);
566 424
567 if (chp_get_status(chpid) != 0) { 425 if (chp_get_status(chpid) != 0) {
426 memset(&link, 0, sizeof(struct chp_link));
427 link.chpid = chpid;
568 /* Wait until previous actions have settled. */ 428 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path(); 429 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 430 for_each_subchannel_staged(__s390_process_res_acc, NULL,
571 &chpid); 431 &link);
572 } 432 }
573} 433}
574 434
575static void __s390_subchannel_vary_chpid(struct subchannel *sch, 435static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on) 436 struct chp_id chpid, int on)
577{ 437{
578 int chp, old_lpm;
579 int mask;
580 unsigned long flags; 438 unsigned long flags;
439 struct chp_link link;
581 440
441 memset(&link, 0, sizeof(struct chp_link));
442 link.chpid = chpid;
582 spin_lock_irqsave(sch->lock, flags); 443 spin_lock_irqsave(sch->lock, flags);
583 old_lpm = sch->lpm; 444 if (sch->driver && sch->driver->chp_event)
584 for (chp = 0; chp < 8; chp++) { 445 sch->driver->chp_event(sch, &link,
585 mask = 0x80 >> chp; 446 on ? CHP_VARY_ON : CHP_VARY_OFF);
586 if (!(sch->ssd_info.path_mask & mask))
587 continue;
588 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589 continue;
590
591 if (on) {
592 sch->opm |= mask;
593 sch->lpm |= mask;
594 if (!old_lpm)
595 device_trigger_reprobe(sch);
596 else if (sch->driver && sch->driver->verify)
597 sch->driver->verify(sch);
598 break;
599 }
600 sch->opm &= ~mask;
601 sch->lpm &= ~mask;
602 if (check_for_io_on_path(sch, mask)) {
603 if (device_is_online(sch))
604 /* Path verification is done after killing. */
605 device_kill_io(sch);
606 else {
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch);
609 /* Re-start path verification. */
610 if (sch->driver && sch->driver->verify)
611 sch->driver->verify(sch);
612 }
613 } else if (!sch->lpm) {
614 if (device_trigger_verify(sch) != 0)
615 css_schedule_eval(sch->schid);
616 } else if (sch->driver && sch->driver->verify)
617 sch->driver->verify(sch);
618 break;
619 }
620 spin_unlock_irqrestore(sch->lock, flags); 447 spin_unlock_irqrestore(sch->lock, flags);
621} 448}
622 449
@@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
656 */ 483 */
657int chsc_chp_vary(struct chp_id chpid, int on) 484int chsc_chp_vary(struct chp_id chpid, int on)
658{ 485{
486 struct chp_link link;
487
488 memset(&link, 0, sizeof(struct chp_link));
489 link.chpid = chpid;
659 /* Wait until previous actions have settled. */ 490 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path(); 491 css_wait_for_slow_path();
661 /* 492 /*
@@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
664 495
665 if (on) 496 if (on)
666 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 497 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
667 __s390_vary_chpid_on, &chpid); 498 __s390_vary_chpid_on, &link);
668 else 499 else
669 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 500 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
670 NULL, &chpid); 501 NULL, &link);
671 502
672 return 0; 503 return 0;
673} 504}
@@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable)
797 return ret; 628 return ret;
798} 629}
799 630
800int chsc_determine_channel_path_description(struct chp_id chpid, 631int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
801 struct channel_path_desc *desc) 632 int c, int m,
633 struct chsc_response_struct *resp)
802{ 634{
803 int ccode, ret; 635 int ccode, ret;
804 636
805 struct { 637 struct {
806 struct chsc_header request; 638 struct chsc_header request;
807 u32 : 24; 639 u32 : 2;
640 u32 m : 1;
641 u32 c : 1;
642 u32 fmt : 4;
643 u32 cssid : 8;
644 u32 : 4;
645 u32 rfmt : 4;
808 u32 first_chpid : 8; 646 u32 first_chpid : 8;
809 u32 : 24; 647 u32 : 24;
810 u32 last_chpid : 8; 648 u32 last_chpid : 8;
811 u32 zeroes1; 649 u32 zeroes1;
812 struct chsc_header response; 650 struct chsc_header response;
813 u32 zeroes2; 651 u8 data[PAGE_SIZE - 20];
814 struct channel_path_desc desc;
815 } __attribute__ ((packed)) *scpd_area; 652 } __attribute__ ((packed)) *scpd_area;
816 653
654 if ((rfmt == 1) && !css_general_characteristics.fcs)
655 return -EINVAL;
656 if ((rfmt == 2) && !css_general_characteristics.cib)
657 return -EINVAL;
817 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 658 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
818 if (!scpd_area) 659 if (!scpd_area)
819 return -ENOMEM; 660 return -ENOMEM;
@@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
821 scpd_area->request.length = 0x0010; 662 scpd_area->request.length = 0x0010;
822 scpd_area->request.code = 0x0002; 663 scpd_area->request.code = 0x0002;
823 664
665 scpd_area->cssid = chpid.cssid;
824 scpd_area->first_chpid = chpid.id; 666 scpd_area->first_chpid = chpid.id;
825 scpd_area->last_chpid = chpid.id; 667 scpd_area->last_chpid = chpid.id;
668 scpd_area->m = m;
669 scpd_area->c = c;
670 scpd_area->fmt = fmt;
671 scpd_area->rfmt = rfmt;
826 672
827 ccode = chsc(scpd_area); 673 ccode = chsc(scpd_area);
828 if (ccode > 0) { 674 if (ccode > 0) {
@@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
833 ret = chsc_error_from_response(scpd_area->response.code); 679 ret = chsc_error_from_response(scpd_area->response.code);
834 if (ret == 0) 680 if (ret == 0)
835 /* Success. */ 681 /* Success. */
836 memcpy(desc, &scpd_area->desc, 682 memcpy(resp, &scpd_area->response, scpd_area->response.length);
837 sizeof(struct channel_path_desc));
838 else 683 else
839 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 684 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840 scpd_area->response.code); 685 scpd_area->response.code);
@@ -842,6 +687,25 @@ out:
842 free_page((unsigned long)scpd_area); 687 free_page((unsigned long)scpd_area);
843 return ret; 688 return ret;
844} 689}
690EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
691
692int chsc_determine_base_channel_path_desc(struct chp_id chpid,
693 struct channel_path_desc *desc)
694{
695 struct chsc_response_struct *chsc_resp;
696 int ret;
697
698 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
699 if (!chsc_resp)
700 return -ENOMEM;
701 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
702 if (ret)
703 goto out_free;
704 memcpy(desc, &chsc_resp->data, chsc_resp->length);
705out_free:
706 kfree(chsc_resp);
707 return ret;
708}
845 709
846static void 710static void
847chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 711chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
@@ -937,15 +801,23 @@ out:
937 801
938int __init chsc_alloc_sei_area(void) 802int __init chsc_alloc_sei_area(void)
939{ 803{
804 int ret;
805
940 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 806 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
941 if (!sei_page) 807 if (!sei_page) {
942 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 808 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943 "chsc machine checks!\n"); 809 "chsc machine checks!\n");
944 return (sei_page ? 0 : -ENOMEM); 810 return -ENOMEM;
811 }
812 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
813 if (ret)
814 kfree(sei_page);
815 return ret;
945} 816}
946 817
947void __init chsc_free_sei_area(void) 818void __init chsc_free_sei_area(void)
948{ 819{
820 s390_unregister_crw_handler(CRW_RSC_CSS);
949 kfree(sei_page); 821 kfree(sei_page);
950} 822}
951 823
@@ -1043,3 +915,52 @@ exit:
1043 915
1044EXPORT_SYMBOL_GPL(css_general_characteristics); 916EXPORT_SYMBOL_GPL(css_general_characteristics);
1045EXPORT_SYMBOL_GPL(css_chsc_characteristics); 917EXPORT_SYMBOL_GPL(css_chsc_characteristics);
918
919int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
920{
921 struct {
922 struct chsc_header request;
923 unsigned int rsvd0;
924 unsigned int op : 8;
925 unsigned int rsvd1 : 8;
926 unsigned int ctrl : 16;
927 unsigned int rsvd2[5];
928 struct chsc_header response;
929 unsigned int rsvd3[7];
930 } __attribute__ ((packed)) *rr;
931 int rc;
932
933 memset(page, 0, PAGE_SIZE);
934 rr = page;
935 rr->request.length = 0x0020;
936 rr->request.code = 0x0033;
937 rr->op = op;
938 rr->ctrl = ctrl;
939 rc = chsc(rr);
940 if (rc)
941 return -EIO;
942 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
943 return rc;
944}
945
946int chsc_sstpi(void *page, void *result, size_t size)
947{
948 struct {
949 struct chsc_header request;
950 unsigned int rsvd0[3];
951 struct chsc_header response;
952 char data[size];
953 } __attribute__ ((packed)) *rr;
954 int rc;
955
956 memset(page, 0, PAGE_SIZE);
957 rr = page;
958 rr->request.length = 0x0010;
959 rr->request.code = 0x0038;
960 rc = chsc(rr);
961 if (rc)
962 return -EIO;
963 memcpy(result, &rr->data, size);
964 return (rr->response.code == 0x0001) ? 0 : -EIO;
965}
966
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1f5db1e69b9..fb6c4d6c45b4 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/chpid.h> 6#include <asm/chpid.h>
7#include "schid.h" 7#include <asm/chsc.h>
8#include <asm/schid.h>
8 9
9#define CHSC_SDA_OC_MSS 0x2 10#define CHSC_SDA_OC_MSS 0x2
10 11
@@ -36,14 +37,15 @@ struct channel_path_desc {
36 37
37struct channel_path; 38struct channel_path;
38 39
39extern void chsc_process_crw(void);
40
41struct css_general_char { 40struct css_general_char {
42 u64 : 41; 41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
43 u32 aif : 1; /* bit 41 */ 44 u32 aif : 1; /* bit 41 */
44 u32 : 3; 45 u32 : 3;
45 u32 mcss : 1; /* bit 45 */ 46 u32 mcss : 1; /* bit 45 */
46 u32 : 2; 47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
47 u32 ext_mb : 1; /* bit 48 */ 49 u32 ext_mb : 1; /* bit 48 */
48 u32 : 7; 50 u32 : 7;
49 u32 aif_tdd : 1; /* bit 56 */ 51 u32 aif_tdd : 1; /* bit 56 */
@@ -51,7 +53,11 @@ struct css_general_char {
51 u32 qebsm : 1; /* bit 58 */ 53 u32 qebsm : 1; /* bit 58 */
52 u32 : 8; 54 u32 : 8;
53 u32 aif_osa : 1; /* bit 67 */ 55 u32 aif_osa : 1; /* bit 67 */
54 u32 : 28; 56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
55}__attribute__((packed)); 61}__attribute__((packed));
56 62
57struct css_chsc_char { 63struct css_chsc_char {
@@ -78,7 +84,6 @@ struct chsc_ssd_info {
78extern int chsc_get_ssd_info(struct subchannel_id schid, 84extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd); 85 struct chsc_ssd_info *ssd);
80extern int chsc_determine_css_characteristics(void); 86extern int chsc_determine_css_characteristics(void);
81extern int css_characteristics_avail;
82extern int chsc_alloc_sei_area(void); 87extern int chsc_alloc_sei_area(void);
83extern void chsc_free_sei_area(void); 88extern void chsc_free_sei_area(void);
84 89
@@ -87,8 +92,11 @@ struct channel_subsystem;
87extern int chsc_secm(struct channel_subsystem *, int); 92extern int chsc_secm(struct channel_subsystem *, int);
88 93
89int chsc_chp_vary(struct chp_id chpid, int on); 94int chsc_chp_vary(struct chp_id chpid, int on);
90int chsc_determine_channel_path_description(struct chp_id chpid, 95int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
91 struct channel_path_desc *desc); 96 int c, int m,
97 struct chsc_response_struct *resp);
98int chsc_determine_base_channel_path_desc(struct chp_id chpid,
99 struct channel_path_desc *desc);
92void chsc_chp_online(struct chp_id chpid); 100void chsc_chp_online(struct chp_id chpid);
93void chsc_chp_offline(struct chp_id chpid); 101void chsc_chp_offline(struct chp_id chpid);
94int chsc_get_channel_measurement_chars(struct channel_path *chp); 102int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000000..91ca87aa9f97
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,820 @@
1/*
2 * Driver for s390 chsc subchannels
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 *
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/miscdevice.h>
13
14#include <asm/cio.h>
15#include <asm/chsc.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "chsc_sch.h"
22#include "ioasm.h"
23
24static debug_info_t *chsc_debug_msg_id;
25static debug_info_t *chsc_debug_log_id;
26
27#define CHSC_MSG(imp, args...) do { \
28 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
29 } while (0)
30
31#define CHSC_LOG(imp, txt) do { \
32 debug_text_event(chsc_debug_log_id, imp , txt); \
33 } while (0)
34
35static void CHSC_LOG_HEX(int level, void *data, int length)
36{
37 while (length > 0) {
38 debug_event(chsc_debug_log_id, level, data, length);
39 length -= chsc_debug_log_id->buf_size;
40 data += chsc_debug_log_id->buf_size;
41 }
42}
43
44MODULE_AUTHOR("IBM Corporation");
45MODULE_DESCRIPTION("driver for s390 chsc subchannels");
46MODULE_LICENSE("GPL");
47
48static void chsc_subchannel_irq(struct subchannel *sch)
49{
50 struct chsc_private *private = sch->private;
51 struct chsc_request *request = private->request;
52 struct irb *irb = (struct irb *)__LC_IRB;
53
54 CHSC_LOG(4, "irb");
55 CHSC_LOG_HEX(4, irb, sizeof(*irb));
56 /* Copy irb to provided request and set done. */
57 if (!request) {
58 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
59 sch->schid.ssid, sch->schid.sch_no);
60 return;
61 }
62 private->request = NULL;
63 memcpy(&request->irb, irb, sizeof(*irb));
64 stsch(sch->schid, &sch->schib);
65 complete(&request->completion);
66 put_device(&sch->dev);
67}
68
69static int chsc_subchannel_probe(struct subchannel *sch)
70{
71 struct chsc_private *private;
72 int ret;
73
74 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
75 sch->schid.ssid, sch->schid.sch_no);
76 sch->isc = CHSC_SCH_ISC;
77 private = kzalloc(sizeof(*private), GFP_KERNEL);
78 if (!private)
79 return -ENOMEM;
80 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
81 if (ret) {
82 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
83 sch->schid.ssid, sch->schid.sch_no, ret);
84 kfree(private);
85 } else {
86 sch->private = private;
87 if (sch->dev.uevent_suppress) {
88 sch->dev.uevent_suppress = 0;
89 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
90 }
91 }
92 return ret;
93}
94
95static int chsc_subchannel_remove(struct subchannel *sch)
96{
97 struct chsc_private *private;
98
99 cio_disable_subchannel(sch);
100 private = sch->private;
101 sch->private = NULL;
102 if (private->request) {
103 complete(&private->request->completion);
104 put_device(&sch->dev);
105 }
106 kfree(private);
107 return 0;
108}
109
110static void chsc_subchannel_shutdown(struct subchannel *sch)
111{
112 cio_disable_subchannel(sch);
113}
114
115static struct css_device_id chsc_subchannel_ids[] = {
116 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
117 { /* end of list */ },
118};
119MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
120
121static struct css_driver chsc_subchannel_driver = {
122 .owner = THIS_MODULE,
123 .subchannel_type = chsc_subchannel_ids,
124 .irq = chsc_subchannel_irq,
125 .probe = chsc_subchannel_probe,
126 .remove = chsc_subchannel_remove,
127 .shutdown = chsc_subchannel_shutdown,
128 .name = "chsc_subchannel",
129};
130
131static int __init chsc_init_dbfs(void)
132{
133 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
134 16 * sizeof(long));
135 if (!chsc_debug_msg_id)
136 goto out;
137 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
138 debug_set_level(chsc_debug_msg_id, 2);
139 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
140 if (!chsc_debug_log_id)
141 goto out;
142 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
143 debug_set_level(chsc_debug_log_id, 2);
144 return 0;
145out:
146 if (chsc_debug_msg_id)
147 debug_unregister(chsc_debug_msg_id);
148 return -ENOMEM;
149}
150
151static void chsc_remove_dbfs(void)
152{
153 debug_unregister(chsc_debug_log_id);
154 debug_unregister(chsc_debug_msg_id);
155}
156
157static int __init chsc_init_sch_driver(void)
158{
159 return css_driver_register(&chsc_subchannel_driver);
160}
161
162static void chsc_cleanup_sch_driver(void)
163{
164 css_driver_unregister(&chsc_subchannel_driver);
165}
166
167static DEFINE_SPINLOCK(chsc_lock);
168
169static int chsc_subchannel_match_next_free(struct device *dev, void *data)
170{
171 struct subchannel *sch = to_subchannel(dev);
172
173 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
174}
175
176static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
177{
178 struct device *dev;
179
180 dev = driver_find_device(&chsc_subchannel_driver.drv,
181 sch ? &sch->dev : NULL, NULL,
182 chsc_subchannel_match_next_free);
183 return dev ? to_subchannel(dev) : NULL;
184}
185
186/**
187 * chsc_async() - try to start a chsc request asynchronously
188 * @chsc_area: request to be started
189 * @request: request structure to associate
190 *
191 * Tries to start a chsc request on one of the existing chsc subchannels.
192 * Returns:
193 * %0 if the request was performed synchronously
194 * %-EINPROGRESS if the request was successfully started
195 * %-EBUSY if all chsc subchannels are busy
196 * %-ENODEV if no chsc subchannels are available
197 * Context:
198 * interrupts disabled, chsc_lock held
199 */
200static int chsc_async(struct chsc_async_area *chsc_area,
201 struct chsc_request *request)
202{
203 int cc;
204 struct chsc_private *private;
205 struct subchannel *sch = NULL;
206 int ret = -ENODEV;
207 char dbf[10];
208
209 chsc_area->header.key = PAGE_DEFAULT_KEY;
210 while ((sch = chsc_get_next_subchannel(sch))) {
211 spin_lock(sch->lock);
212 private = sch->private;
213 if (private->request) {
214 spin_unlock(sch->lock);
215 ret = -EBUSY;
216 continue;
217 }
218 chsc_area->header.sid = sch->schid;
219 CHSC_LOG(2, "schid");
220 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
221 cc = chsc(chsc_area);
222 sprintf(dbf, "cc:%d", cc);
223 CHSC_LOG(2, dbf);
224 switch (cc) {
225 case 0:
226 ret = 0;
227 break;
228 case 1:
229 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
230 ret = -EINPROGRESS;
231 private->request = request;
232 break;
233 case 2:
234 ret = -EBUSY;
235 break;
236 default:
237 ret = -ENODEV;
238 }
239 spin_unlock(sch->lock);
240 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
241 sch->schid.ssid, sch->schid.sch_no, cc);
242 if (ret == -EINPROGRESS)
243 return -EINPROGRESS;
244 put_device(&sch->dev);
245 if (ret == 0)
246 return 0;
247 }
248 return ret;
249}
250
251static void chsc_log_command(struct chsc_async_area *chsc_area)
252{
253 char dbf[10];
254
255 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
256 CHSC_LOG(0, dbf);
257 CHSC_LOG_HEX(0, chsc_area, 32);
258}
259
260static int chsc_examine_irb(struct chsc_request *request)
261{
262 int backed_up;
263
264 if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
265 return -EIO;
266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
268 if (scsw_cstat(&request->irb.scsw) == 0)
269 return 0;
270 if (!backed_up)
271 return 0;
272 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
273 return -EIO;
274 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
275 return -EPERM;
276 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
277 return -EAGAIN;
278 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
279 return -EAGAIN;
280 return -EIO;
281}
282
283static int chsc_ioctl_start(void __user *user_area)
284{
285 struct chsc_request *request;
286 struct chsc_async_area *chsc_area;
287 int ret;
288 char dbf[10];
289
290 if (!css_general_characteristics.dynio)
291 /* It makes no sense to try. */
292 return -EOPNOTSUPP;
293 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
294 if (!chsc_area)
295 return -ENOMEM;
296 request = kzalloc(sizeof(*request), GFP_KERNEL);
297 if (!request) {
298 ret = -ENOMEM;
299 goto out_free;
300 }
301 init_completion(&request->completion);
302 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
303 ret = -EFAULT;
304 goto out_free;
305 }
306 chsc_log_command(chsc_area);
307 spin_lock_irq(&chsc_lock);
308 ret = chsc_async(chsc_area, request);
309 spin_unlock_irq(&chsc_lock);
310 if (ret == -EINPROGRESS) {
311 wait_for_completion(&request->completion);
312 ret = chsc_examine_irb(request);
313 }
314 /* copy area back to user */
315 if (!ret)
316 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
317 ret = -EFAULT;
318out_free:
319 sprintf(dbf, "ret:%d", ret);
320 CHSC_LOG(0, dbf);
321 kfree(request);
322 free_page((unsigned long)chsc_area);
323 return ret;
324}
325
326static int chsc_ioctl_info_channel_path(void __user *user_cd)
327{
328 struct chsc_chp_cd *cd;
329 int ret, ccode;
330 struct {
331 struct chsc_header request;
332 u32 : 2;
333 u32 m : 1;
334 u32 : 1;
335 u32 fmt1 : 4;
336 u32 cssid : 8;
337 u32 : 8;
338 u32 first_chpid : 8;
339 u32 : 24;
340 u32 last_chpid : 8;
341 u32 : 32;
342 struct chsc_header response;
343 u8 data[PAGE_SIZE - 20];
344 } __attribute__ ((packed)) *scpcd_area;
345
346 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
347 if (!scpcd_area)
348 return -ENOMEM;
349 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
350 if (!cd) {
351 ret = -ENOMEM;
352 goto out_free;
353 }
354 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
355 ret = -EFAULT;
356 goto out_free;
357 }
358 scpcd_area->request.length = 0x0010;
359 scpcd_area->request.code = 0x0028;
360 scpcd_area->m = cd->m;
361 scpcd_area->fmt1 = cd->fmt;
362 scpcd_area->cssid = cd->chpid.cssid;
363 scpcd_area->first_chpid = cd->chpid.id;
364 scpcd_area->last_chpid = cd->chpid.id;
365
366 ccode = chsc(scpcd_area);
367 if (ccode != 0) {
368 ret = -EIO;
369 goto out_free;
370 }
371 if (scpcd_area->response.code != 0x0001) {
372 ret = -EIO;
373 CHSC_MSG(0, "scpcd: response code=%x\n",
374 scpcd_area->response.code);
375 goto out_free;
376 }
377 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
378 if (copy_to_user(user_cd, cd, sizeof(*cd)))
379 ret = -EFAULT;
380 else
381 ret = 0;
382out_free:
383 kfree(cd);
384 free_page((unsigned long)scpcd_area);
385 return ret;
386}
387
388static int chsc_ioctl_info_cu(void __user *user_cd)
389{
390 struct chsc_cu_cd *cd;
391 int ret, ccode;
392 struct {
393 struct chsc_header request;
394 u32 : 2;
395 u32 m : 1;
396 u32 : 1;
397 u32 fmt1 : 4;
398 u32 cssid : 8;
399 u32 : 8;
400 u32 first_cun : 8;
401 u32 : 24;
402 u32 last_cun : 8;
403 u32 : 32;
404 struct chsc_header response;
405 u8 data[PAGE_SIZE - 20];
406 } __attribute__ ((packed)) *scucd_area;
407
408 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
409 if (!scucd_area)
410 return -ENOMEM;
411 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
412 if (!cd) {
413 ret = -ENOMEM;
414 goto out_free;
415 }
416 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
417 ret = -EFAULT;
418 goto out_free;
419 }
420 scucd_area->request.length = 0x0010;
421 scucd_area->request.code = 0x0028;
422 scucd_area->m = cd->m;
423 scucd_area->fmt1 = cd->fmt;
424 scucd_area->cssid = cd->cssid;
425 scucd_area->first_cun = cd->cun;
426 scucd_area->last_cun = cd->cun;
427
428 ccode = chsc(scucd_area);
429 if (ccode != 0) {
430 ret = -EIO;
431 goto out_free;
432 }
433 if (scucd_area->response.code != 0x0001) {
434 ret = -EIO;
435 CHSC_MSG(0, "scucd: response code=%x\n",
436 scucd_area->response.code);
437 goto out_free;
438 }
439 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
440 if (copy_to_user(user_cd, cd, sizeof(*cd)))
441 ret = -EFAULT;
442 else
443 ret = 0;
444out_free:
445 kfree(cd);
446 free_page((unsigned long)scucd_area);
447 return ret;
448}
449
450static int chsc_ioctl_info_sch_cu(void __user *user_cud)
451{
452 struct chsc_sch_cud *cud;
453 int ret, ccode;
454 struct {
455 struct chsc_header request;
456 u32 : 2;
457 u32 m : 1;
458 u32 : 5;
459 u32 fmt1 : 4;
460 u32 : 2;
461 u32 ssid : 2;
462 u32 first_sch : 16;
463 u32 : 8;
464 u32 cssid : 8;
465 u32 last_sch : 16;
466 u32 : 32;
467 struct chsc_header response;
468 u8 data[PAGE_SIZE - 20];
469 } __attribute__ ((packed)) *sscud_area;
470
471 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
472 if (!sscud_area)
473 return -ENOMEM;
474 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
475 if (!cud) {
476 ret = -ENOMEM;
477 goto out_free;
478 }
479 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
480 ret = -EFAULT;
481 goto out_free;
482 }
483 sscud_area->request.length = 0x0010;
484 sscud_area->request.code = 0x0006;
485 sscud_area->m = cud->schid.m;
486 sscud_area->fmt1 = cud->fmt;
487 sscud_area->ssid = cud->schid.ssid;
488 sscud_area->first_sch = cud->schid.sch_no;
489 sscud_area->cssid = cud->schid.cssid;
490 sscud_area->last_sch = cud->schid.sch_no;
491
492 ccode = chsc(sscud_area);
493 if (ccode != 0) {
494 ret = -EIO;
495 goto out_free;
496 }
497 if (sscud_area->response.code != 0x0001) {
498 ret = -EIO;
499 CHSC_MSG(0, "sscud: response code=%x\n",
500 sscud_area->response.code);
501 goto out_free;
502 }
503 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
504 if (copy_to_user(user_cud, cud, sizeof(*cud)))
505 ret = -EFAULT;
506 else
507 ret = 0;
508out_free:
509 kfree(cud);
510 free_page((unsigned long)sscud_area);
511 return ret;
512}
513
514static int chsc_ioctl_conf_info(void __user *user_ci)
515{
516 struct chsc_conf_info *ci;
517 int ret, ccode;
518 struct {
519 struct chsc_header request;
520 u32 : 2;
521 u32 m : 1;
522 u32 : 1;
523 u32 fmt1 : 4;
524 u32 cssid : 8;
525 u32 : 6;
526 u32 ssid : 2;
527 u32 : 8;
528 u64 : 64;
529 struct chsc_header response;
530 u8 data[PAGE_SIZE - 20];
531 } __attribute__ ((packed)) *sci_area;
532
533 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
534 if (!sci_area)
535 return -ENOMEM;
536 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
537 if (!ci) {
538 ret = -ENOMEM;
539 goto out_free;
540 }
541 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
542 ret = -EFAULT;
543 goto out_free;
544 }
545 sci_area->request.length = 0x0010;
546 sci_area->request.code = 0x0012;
547 sci_area->m = ci->id.m;
548 sci_area->fmt1 = ci->fmt;
549 sci_area->cssid = ci->id.cssid;
550 sci_area->ssid = ci->id.ssid;
551
552 ccode = chsc(sci_area);
553 if (ccode != 0) {
554 ret = -EIO;
555 goto out_free;
556 }
557 if (sci_area->response.code != 0x0001) {
558 ret = -EIO;
559 CHSC_MSG(0, "sci: response code=%x\n",
560 sci_area->response.code);
561 goto out_free;
562 }
563 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
564 if (copy_to_user(user_ci, ci, sizeof(*ci)))
565 ret = -EFAULT;
566 else
567 ret = 0;
568out_free:
569 kfree(ci);
570 free_page((unsigned long)sci_area);
571 return ret;
572}
573
574static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
575{
576 struct chsc_comp_list *ccl;
577 int ret, ccode;
578 struct {
579 struct chsc_header request;
580 u32 ctype : 8;
581 u32 : 4;
582 u32 fmt : 4;
583 u32 : 16;
584 u64 : 64;
585 u32 list_parm[2];
586 u64 : 64;
587 struct chsc_header response;
588 u8 data[PAGE_SIZE - 36];
589 } __attribute__ ((packed)) *sccl_area;
590 struct {
591 u32 m : 1;
592 u32 : 31;
593 u32 cssid : 8;
594 u32 : 16;
595 u32 chpid : 8;
596 } __attribute__ ((packed)) *chpid_parm;
597 struct {
598 u32 f_cssid : 8;
599 u32 l_cssid : 8;
600 u32 : 16;
601 u32 res;
602 } __attribute__ ((packed)) *cssids_parm;
603
604 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
605 if (!sccl_area)
606 return -ENOMEM;
607 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
608 if (!ccl) {
609 ret = -ENOMEM;
610 goto out_free;
611 }
612 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
613 ret = -EFAULT;
614 goto out_free;
615 }
616 sccl_area->request.length = 0x0020;
617 sccl_area->request.code = 0x0030;
618 sccl_area->fmt = ccl->req.fmt;
619 sccl_area->ctype = ccl->req.ctype;
620 switch (sccl_area->ctype) {
621 case CCL_CU_ON_CHP:
622 case CCL_IOP_CHP:
623 chpid_parm = (void *)&sccl_area->list_parm;
624 chpid_parm->m = ccl->req.chpid.m;
625 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
626 chpid_parm->chpid = ccl->req.chpid.chp.id;
627 break;
628 case CCL_CSS_IMG:
629 case CCL_CSS_IMG_CONF_CHAR:
630 cssids_parm = (void *)&sccl_area->list_parm;
631 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
632 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
633 break;
634 }
635 ccode = chsc(sccl_area);
636 if (ccode != 0) {
637 ret = -EIO;
638 goto out_free;
639 }
640 if (sccl_area->response.code != 0x0001) {
641 ret = -EIO;
642 CHSC_MSG(0, "sccl: response code=%x\n",
643 sccl_area->response.code);
644 goto out_free;
645 }
646 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
647 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
648 ret = -EFAULT;
649 else
650 ret = 0;
651out_free:
652 kfree(ccl);
653 free_page((unsigned long)sccl_area);
654 return ret;
655}
656
657static int chsc_ioctl_chpd(void __user *user_chpd)
658{
659 struct chsc_cpd_info *chpd;
660 int ret;
661
662 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
663 if (!chpd)
664 return -ENOMEM;
665 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
666 ret = -EFAULT;
667 goto out_free;
668 }
669 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
670 chpd->rfmt, chpd->c, chpd->m,
671 &chpd->chpdb);
672 if (ret)
673 goto out_free;
674 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
675 ret = -EFAULT;
676out_free:
677 kfree(chpd);
678 return ret;
679}
680
681static int chsc_ioctl_dcal(void __user *user_dcal)
682{
683 struct chsc_dcal *dcal;
684 int ret, ccode;
685 struct {
686 struct chsc_header request;
687 u32 atype : 8;
688 u32 : 4;
689 u32 fmt : 4;
690 u32 : 16;
691 u32 res0[2];
692 u32 list_parm[2];
693 u32 res1[2];
694 struct chsc_header response;
695 u8 data[PAGE_SIZE - 36];
696 } __attribute__ ((packed)) *sdcal_area;
697
698 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
699 if (!sdcal_area)
700 return -ENOMEM;
701 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
702 if (!dcal) {
703 ret = -ENOMEM;
704 goto out_free;
705 }
706 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
707 ret = -EFAULT;
708 goto out_free;
709 }
710 sdcal_area->request.length = 0x0020;
711 sdcal_area->request.code = 0x0034;
712 sdcal_area->atype = dcal->req.atype;
713 sdcal_area->fmt = dcal->req.fmt;
714 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
715 sizeof(sdcal_area->list_parm));
716
717 ccode = chsc(sdcal_area);
718 if (ccode != 0) {
719 ret = -EIO;
720 goto out_free;
721 }
722 if (sdcal_area->response.code != 0x0001) {
723 ret = -EIO;
724 CHSC_MSG(0, "sdcal: response code=%x\n",
725 sdcal_area->response.code);
726 goto out_free;
727 }
728 memcpy(&dcal->sdcal, &sdcal_area->response,
729 sdcal_area->response.length);
730 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
731 ret = -EFAULT;
732 else
733 ret = 0;
734out_free:
735 kfree(dcal);
736 free_page((unsigned long)sdcal_area);
737 return ret;
738}
739
740static long chsc_ioctl(struct file *filp, unsigned int cmd,
741 unsigned long arg)
742{
743 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
744 switch (cmd) {
745 case CHSC_START:
746 return chsc_ioctl_start((void __user *)arg);
747 case CHSC_INFO_CHANNEL_PATH:
748 return chsc_ioctl_info_channel_path((void __user *)arg);
749 case CHSC_INFO_CU:
750 return chsc_ioctl_info_cu((void __user *)arg);
751 case CHSC_INFO_SCH_CU:
752 return chsc_ioctl_info_sch_cu((void __user *)arg);
753 case CHSC_INFO_CI:
754 return chsc_ioctl_conf_info((void __user *)arg);
755 case CHSC_INFO_CCL:
756 return chsc_ioctl_conf_comp_list((void __user *)arg);
757 case CHSC_INFO_CPD:
758 return chsc_ioctl_chpd((void __user *)arg);
759 case CHSC_INFO_DCAL:
760 return chsc_ioctl_dcal((void __user *)arg);
761 default: /* unknown ioctl number */
762 return -ENOIOCTLCMD;
763 }
764}
765
766static const struct file_operations chsc_fops = {
767 .owner = THIS_MODULE,
768 .unlocked_ioctl = chsc_ioctl,
769 .compat_ioctl = chsc_ioctl,
770};
771
772static struct miscdevice chsc_misc_device = {
773 .minor = MISC_DYNAMIC_MINOR,
774 .name = "chsc",
775 .fops = &chsc_fops,
776};
777
778static int __init chsc_misc_init(void)
779{
780 return misc_register(&chsc_misc_device);
781}
782
783static void chsc_misc_cleanup(void)
784{
785 misc_deregister(&chsc_misc_device);
786}
787
788static int __init chsc_sch_init(void)
789{
790 int ret;
791
792 ret = chsc_init_dbfs();
793 if (ret)
794 return ret;
795 isc_register(CHSC_SCH_ISC);
796 ret = chsc_init_sch_driver();
797 if (ret)
798 goto out_dbf;
799 ret = chsc_misc_init();
800 if (ret)
801 goto out_driver;
802 return ret;
803out_driver:
804 chsc_cleanup_sch_driver();
805out_dbf:
806 isc_unregister(CHSC_SCH_ISC);
807 chsc_remove_dbfs();
808 return ret;
809}
810
811static void __exit chsc_sch_exit(void)
812{
813 chsc_misc_cleanup();
814 chsc_cleanup_sch_driver();
815 isc_unregister(CHSC_SCH_ISC);
816 chsc_remove_dbfs();
817}
818
819module_init(chsc_sch_init);
820module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000000..589ebfad6aad
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
1#ifndef _CHSC_SCH_H
2#define _CHSC_SCH_H
3
4struct chsc_request {
5 struct completion completion;
6 struct irb irb;
7};
8
9struct chsc_private {
10 struct chsc_request *request;
11};
12
13#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b32d7eb3d81a..33bff8fec7d1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -24,7 +24,9 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/isc.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
29#include <asm/fcx.h>
28#include "cio.h" 30#include "cio.h"
29#include "css.h" 31#include "css.h"
30#include "chsc.h" 32#include "chsc.h"
@@ -72,7 +74,6 @@ out_unregister:
72 debug_unregister(cio_debug_trace_id); 74 debug_unregister(cio_debug_trace_id);
73 if (cio_debug_crw_id) 75 if (cio_debug_crw_id)
74 debug_unregister(cio_debug_crw_id); 76 debug_unregister(cio_debug_crw_id);
75 printk(KERN_WARNING"cio: could not initialize debugging\n");
76 return -1; 77 return -1;
77} 78}
78 79
@@ -128,7 +129,7 @@ cio_tpi(void)
128 local_bh_disable(); 129 local_bh_disable();
129 irq_enter (); 130 irq_enter ();
130 spin_lock(sch->lock); 131 spin_lock(sch->lock);
131 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 132 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
132 if (sch->driver && sch->driver->irq) 133 if (sch->driver && sch->driver->irq)
133 sch->driver->irq(sch); 134 sch->driver->irq(sch);
134 spin_unlock(sch->lock); 135 spin_unlock(sch->lock);
@@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
167{ 168{
168 char dbf_txt[15]; 169 char dbf_txt[15];
169 int ccode; 170 int ccode;
170 struct orb *orb; 171 union orb *orb;
171 172
172 CIO_TRACE_EVENT(4, "stIO"); 173 CIO_TRACE_EVENT(4, "stIO");
173 CIO_TRACE_EVENT(4, sch->dev.bus_id); 174 CIO_TRACE_EVENT(4, sch->dev.bus_id);
174 175
175 orb = &to_io_private(sch)->orb; 176 orb = &to_io_private(sch)->orb;
176 /* sch is always under 2G. */ 177 /* sch is always under 2G. */
177 orb->intparm = (u32)(addr_t)sch; 178 orb->cmd.intparm = (u32)(addr_t)sch;
178 orb->fmt = 1; 179 orb->cmd.fmt = 1;
179 180
180 orb->pfch = sch->options.prefetch == 0; 181 orb->cmd.pfch = sch->options.prefetch == 0;
181 orb->spnd = sch->options.suspend; 182 orb->cmd.spnd = sch->options.suspend;
182 orb->ssic = sch->options.suspend && sch->options.inter; 183 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
183 orb->lpm = (lpm != 0) ? lpm : sch->lpm; 184 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
184#ifdef CONFIG_64BIT 185#ifdef CONFIG_64BIT
185 /* 186 /*
186 * for 64 bit we always support 64 bit IDAWs with 4k page size only 187 * for 64 bit we always support 64 bit IDAWs with 4k page size only
187 */ 188 */
188 orb->c64 = 1; 189 orb->cmd.c64 = 1;
189 orb->i2k = 0; 190 orb->cmd.i2k = 0;
190#endif 191#endif
191 orb->key = key >> 4; 192 orb->cmd.key = key >> 4;
192 /* issue "Start Subchannel" */ 193 /* issue "Start Subchannel" */
193 orb->cpa = (__u32) __pa(cpa); 194 orb->cmd.cpa = (__u32) __pa(cpa);
194 ccode = ssch(sch->schid, orb); 195 ccode = ssch(sch->schid, orb);
195 196
196 /* process condition code */ 197 /* process condition code */
@@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
202 /* 203 /*
203 * initialize device status information 204 * initialize device status information
204 */ 205 */
205 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; 206 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
206 return 0; 207 return 0;
207 case 1: /* status pending */ 208 case 1: /* status pending */
208 case 2: /* busy */ 209 case 2: /* busy */
@@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch)
237 238
238 switch (ccode) { 239 switch (ccode) {
239 case 0: 240 case 0:
240 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; 241 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
241 return 0; 242 return 0;
242 case 1: 243 case 1:
243 return -EBUSY; 244 return -EBUSY;
@@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch)
277 278
278 switch (ccode) { 279 switch (ccode) {
279 case 0: 280 case 0:
280 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; 281 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
281 return 0; 282 return 0;
282 case 1: /* status pending */ 283 case 1: /* status pending */
283 case 2: /* busy */ 284 case 2: /* busy */
@@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch)
312 313
313 switch (ccode) { 314 switch (ccode) {
314 case 0: 315 case 0:
315 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; 316 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
316 return 0; 317 return 0;
317 default: /* device not operational */ 318 default: /* device not operational */
318 return -ENODEV; 319 return -ENODEV;
@@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch)
387 return ret; 388 return ret;
388} 389}
389 390
390/* 391/**
391 * Enable subchannel. 392 * cio_enable_subchannel - enable a subchannel.
393 * @sch: subchannel to be enabled
394 * @intparm: interruption parameter to set
392 */ 395 */
393int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 396int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
394{ 397{
@@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
434 CIO_TRACE_EVENT (2, dbf_txt); 437 CIO_TRACE_EVENT (2, dbf_txt);
435 return ret; 438 return ret;
436} 439}
440EXPORT_SYMBOL_GPL(cio_enable_subchannel);
437 441
438/* 442/**
439 * Disable subchannel. 443 * cio_disable_subchannel - disable a subchannel.
444 * @sch: subchannel to disable
440 */ 445 */
441int 446int cio_disable_subchannel(struct subchannel *sch)
442cio_disable_subchannel (struct subchannel *sch)
443{ 447{
444 char dbf_txt[15]; 448 char dbf_txt[15];
445 int ccode; 449 int ccode;
@@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch)
455 if (ccode == 3) /* Not operational. */ 459 if (ccode == 3) /* Not operational. */
456 return -ENODEV; 460 return -ENODEV;
457 461
458 if (sch->schib.scsw.actl != 0) 462 if (scsw_actl(&sch->schib.scsw) != 0)
459 /* 463 /*
460 * the disable function must not be called while there are 464 * the disable function must not be called while there are
461 * requests pending for completion ! 465 * requests pending for completion !
@@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch)
484 CIO_TRACE_EVENT (2, dbf_txt); 488 CIO_TRACE_EVENT (2, dbf_txt);
485 return ret; 489 return ret;
486} 490}
491EXPORT_SYMBOL_GPL(cio_disable_subchannel);
487 492
488int cio_create_sch_lock(struct subchannel *sch) 493int cio_create_sch_lock(struct subchannel *sch)
489{ 494{
@@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch)
494 return 0; 499 return 0;
495} 500}
496 501
497/* 502static int cio_check_devno_blacklisted(struct subchannel *sch)
498 * cio_validate_subchannel() 503{
504 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
505 /*
506 * This device must not be known to Linux. So we simply
507 * say that there is no device and return ENODEV.
508 */
509 CIO_MSG_EVENT(6, "Blacklisted device detected "
510 "at devno %04X, subchannel set %x\n",
511 sch->schib.pmcw.dev, sch->schid.ssid);
512 return -ENODEV;
513 }
514 return 0;
515}
516
517static int cio_validate_io_subchannel(struct subchannel *sch)
518{
519 /* Initialization for io subchannels. */
520 if (!css_sch_is_valid(&sch->schib))
521 return -ENODEV;
522
523 /* Devno is valid. */
524 return cio_check_devno_blacklisted(sch);
525}
526
527static int cio_validate_msg_subchannel(struct subchannel *sch)
528{
529 /* Initialization for message subchannels. */
530 if (!css_sch_is_valid(&sch->schib))
531 return -ENODEV;
532
533 /* Devno is valid. */
534 return cio_check_devno_blacklisted(sch);
535}
536
537/**
538 * cio_validate_subchannel - basic validation of subchannel
539 * @sch: subchannel structure to be filled out
540 * @schid: subchannel id
499 * 541 *
500 * Find out subchannel type and initialize struct subchannel. 542 * Find out subchannel type and initialize struct subchannel.
501 * Return codes: 543 * Return codes:
502 * SUBCHANNEL_TYPE_IO for a normal io subchannel 544 * 0 on success
503 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
504 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
505 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
506 * -ENXIO for non-defined subchannels 545 * -ENXIO for non-defined subchannels
507 * -ENODEV for subchannels with invalid device number or blacklisted devices 546 * -ENODEV for invalid subchannels or blacklisted devices
547 * -EIO for subchannels in an invalid subchannel set
508 */ 548 */
509int 549int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
510cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
511{ 550{
512 char dbf_txt[15]; 551 char dbf_txt[15];
513 int ccode; 552 int ccode;
514 int err; 553 int err;
515 554
516 sprintf (dbf_txt, "valsch%x", schid.sch_no); 555 sprintf(dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 556 CIO_TRACE_EVENT(4, dbf_txt);
518 557
519 /* Nuke all fields. */ 558 /* Nuke all fields. */
520 memset(sch, 0, sizeof(struct subchannel)); 559 memset(sch, 0, sizeof(struct subchannel));
@@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
546 /* Copy subchannel type from path management control word. */ 585 /* Copy subchannel type from path management control word. */
547 sch->st = sch->schib.pmcw.st; 586 sch->st = sch->schib.pmcw.st;
548 587
549 /* 588 switch (sch->st) {
550 * ... just being curious we check for non I/O subchannels 589 case SUBCHANNEL_TYPE_IO:
551 */ 590 err = cio_validate_io_subchannel(sch);
552 if (sch->st != 0) { 591 break;
553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " 592 case SUBCHANNEL_TYPE_MSG:
554 "non-I/O subchannel type %04X\n", 593 err = cio_validate_msg_subchannel(sch);
555 sch->schid.ssid, sch->schid.sch_no, sch->st); 594 break;
556 /* We stop here for non-io subchannels. */ 595 default:
557 err = sch->st; 596 err = 0;
558 goto out;
559 }
560
561 /* Initialization for io subchannels. */
562 if (!css_sch_is_valid(&sch->schib)) {
563 err = -ENODEV;
564 goto out;
565 } 597 }
566 598 if (err)
567 /* Devno is valid. */
568 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
569 /*
570 * This device must not be known to Linux. So we simply
571 * say that there is no device and return ENODEV.
572 */
573 CIO_MSG_EVENT(6, "Blacklisted device detected "
574 "at devno %04X, subchannel set %x\n",
575 sch->schib.pmcw.dev, sch->schid.ssid);
576 err = -ENODEV;
577 goto out; 599 goto out;
578 }
579 if (cio_is_console(sch->schid)) {
580 sch->opm = 0xff;
581 sch->isc = 1;
582 } else {
583 sch->opm = chp_get_sch_opm(sch);
584 sch->isc = 3;
585 }
586 sch->lpm = sch->schib.pmcw.pam & sch->opm;
587
588 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
589 "- PIM = %02X, PAM = %02X, POM = %02X\n",
590 sch->schib.pmcw.dev, sch->schid.ssid,
591 sch->schid.sch_no, sch->schib.pmcw.pim,
592 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
593 600
594 /* 601 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
595 * We now have to initially ... 602 sch->schid.ssid, sch->schid.sch_no, sch->st);
596 * ... enable "concurrent sense"
597 * ... enable "multipath mode" if more than one
598 * CHPID is available. This is done regardless
599 * whether multiple paths are available for us.
600 */
601 sch->schib.pmcw.csense = 1; /* concurrent sense */
602 sch->schib.pmcw.ena = 0;
603 if ((sch->lpm & (sch->lpm - 1)) != 0)
604 sch->schib.pmcw.mp = 1; /* multipath mode */
605 /* clean up possible residual cmf stuff */
606 sch->schib.pmcw.mme = 0;
607 sch->schib.pmcw.mbfc = 0;
608 sch->schib.pmcw.mbi = 0;
609 sch->schib.mba = 0;
610 return 0; 603 return 0;
611out: 604out:
612 if (!cio_is_console(schid)) 605 if (!cio_is_console(schid))
@@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs)
647 */ 640 */
648 if (tpi_info->adapter_IO == 1 && 641 if (tpi_info->adapter_IO == 1 &&
649 tpi_info->int_type == IO_INTERRUPT_TYPE) { 642 tpi_info->int_type == IO_INTERRUPT_TYPE) {
650 do_adapter_IO(); 643 do_adapter_IO(tpi_info->isc);
651 continue; 644 continue;
652 } 645 }
653 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 646 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -706,9 +699,9 @@ void wait_cons_dev(void)
706 if (!console_subchannel_in_use) 699 if (!console_subchannel_in_use)
707 return; 700 return;
708 701
709 /* disable all but isc 1 (console device) */ 702 /* disable all but the console isc */
710 __ctl_store (save_cr6, 6, 6); 703 __ctl_store (save_cr6, 6, 6);
711 cr6 = 0x40000000; 704 cr6 = 1UL << (31 - CONSOLE_ISC);
712 __ctl_load (cr6, 6, 6); 705 __ctl_load (cr6, 6, 6);
713 706
714 do { 707 do {
@@ -716,7 +709,7 @@ void wait_cons_dev(void)
716 if (!cio_tpi()) 709 if (!cio_tpi())
717 cpu_relax(); 710 cpu_relax();
718 spin_lock(console_subchannel.lock); 711 spin_lock(console_subchannel.lock);
719 } while (console_subchannel.schib.scsw.actl != 0); 712 } while (console_subchannel.schib.scsw.cmd.actl != 0);
720 /* 713 /*
721 * restore previous isc value 714 * restore previous isc value
722 */ 715 */
@@ -761,7 +754,6 @@ cio_get_console_sch_no(void)
761 /* unlike in 2.4, we cannot autoprobe here, since 754 /* unlike in 2.4, we cannot autoprobe here, since
762 * the channel subsystem is not fully initialized. 755 * the channel subsystem is not fully initialized.
763 * With some luck, the HWC console can take over */ 756 * With some luck, the HWC console can take over */
764 printk(KERN_WARNING "cio: No ccw console found!\n");
765 return -1; 757 return -1;
766 } 758 }
767 return console_irq; 759 return console_irq;
@@ -778,6 +770,7 @@ cio_probe_console(void)
778 sch_no = cio_get_console_sch_no(); 770 sch_no = cio_get_console_sch_no();
779 if (sch_no == -1) { 771 if (sch_no == -1) {
780 console_subchannel_in_use = 0; 772 console_subchannel_in_use = 0;
773 printk(KERN_WARNING "cio: No ccw console found!\n");
781 return ERR_PTR(-ENODEV); 774 return ERR_PTR(-ENODEV);
782 } 775 }
783 memset(&console_subchannel, 0, sizeof(struct subchannel)); 776 memset(&console_subchannel, 0, sizeof(struct subchannel));
@@ -790,15 +783,15 @@ cio_probe_console(void)
790 } 783 }
791 784
792 /* 785 /*
793 * enable console I/O-interrupt subclass 1 786 * enable console I/O-interrupt subclass
794 */ 787 */
795 ctl_set_bit(6, 30); 788 isc_register(CONSOLE_ISC);
796 console_subchannel.isc = 1; 789 console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
797 console_subchannel.schib.pmcw.isc = 1;
798 console_subchannel.schib.pmcw.intparm = 790 console_subchannel.schib.pmcw.intparm =
799 (u32)(addr_t)&console_subchannel; 791 (u32)(addr_t)&console_subchannel;
800 ret = cio_modify(&console_subchannel); 792 ret = cio_modify(&console_subchannel);
801 if (ret) { 793 if (ret) {
794 isc_unregister(CONSOLE_ISC);
802 console_subchannel_in_use = 0; 795 console_subchannel_in_use = 0;
803 return ERR_PTR(ret); 796 return ERR_PTR(ret);
804 } 797 }
@@ -810,7 +803,7 @@ cio_release_console(void)
810{ 803{
811 console_subchannel.schib.pmcw.intparm = 0; 804 console_subchannel.schib.pmcw.intparm = 0;
812 cio_modify(&console_subchannel); 805 cio_modify(&console_subchannel);
813 ctl_clear_bit(6, 24); 806 isc_unregister(CONSOLE_ISC);
814 console_subchannel_in_use = 0; 807 console_subchannel_in_use = 0;
815} 808}
816 809
@@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs)
864} 857}
865 858
866static int 859static int
867__clear_subchannel_easy(struct subchannel_id schid) 860__clear_io_subchannel_easy(struct subchannel_id schid)
868{ 861{
869 int retry; 862 int retry;
870 863
@@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
883 return -EBUSY; 876 return -EBUSY;
884} 877}
885 878
879static void __clear_chsc_subchannel_easy(void)
880{
881 /* It seems we can only wait for a bit here :/ */
882 udelay_reset(100);
883}
884
886static int pgm_check_occured; 885static int pgm_check_occured;
887 886
888static void cio_reset_pgm_check_handler(void) 887static void cio_reset_pgm_check_handler(void)
@@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
921 case -ENODEV: 920 case -ENODEV:
922 break; 921 break;
923 default: /* -EBUSY */ 922 default: /* -EBUSY */
924 if (__clear_subchannel_easy(schid)) 923 switch (schib.pmcw.st) {
925 break; /* give up... */ 924 case SUBCHANNEL_TYPE_IO:
925 if (__clear_io_subchannel_easy(schid))
926 goto out; /* give up... */
927 break;
928 case SUBCHANNEL_TYPE_CHSC:
929 __clear_chsc_subchannel_easy();
930 break;
931 default:
932 /* No default clear strategy */
933 break;
934 }
926 stsch(schid, &schib); 935 stsch(schid, &schib);
927 __disable_subchannel_easy(schid, &schib); 936 __disable_subchannel_easy(schid, &schib);
928 } 937 }
938out:
929 return 0; 939 return 0;
930} 940}
931 941
@@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 iplinfo->is_qdio = schib.pmcw.qf; 1078 iplinfo->is_qdio = schib.pmcw.qf;
1069 return 0; 1079 return 0;
1070} 1080}
1081
1082/**
1083 * cio_tm_start_key - perform start function
1084 * @sch: subchannel on which to perform the start function
1085 * @tcw: transport-command word to be started
1086 * @lpm: mask of paths to use
1087 * @key: storage key to use for storage access
1088 *
1089 * Start the tcw on the given subchannel. Return zero on success, non-zero
1090 * otherwise.
1091 */
1092int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1093{
1094 int cc;
1095 union orb *orb = &to_io_private(sch)->orb;
1096
1097 memset(orb, 0, sizeof(union orb));
1098 orb->tm.intparm = (u32) (addr_t) sch;
1099 orb->tm.key = key >> 4;
1100 orb->tm.b = 1;
1101 orb->tm.lpm = lpm ? lpm : sch->lpm;
1102 orb->tm.tcw = (u32) (addr_t) tcw;
1103 cc = ssch(sch->schid, orb);
1104 switch (cc) {
1105 case 0:
1106 return 0;
1107 case 1:
1108 case 2:
1109 return -EBUSY;
1110 default:
1111 return cio_start_handle_notoper(sch, lpm);
1112 }
1113}
1114
1115/**
1116 * cio_tm_intrg - perform interrogate function
1117 * @sch - subchannel on which to perform the interrogate function
1118 *
1119 * If the specified subchannel is running in transport-mode, perform the
1120 * interrogate function. Return zero on success, non-zero otherwie.
1121 */
1122int cio_tm_intrg(struct subchannel *sch)
1123{
1124 int cc;
1125
1126 if (!to_io_private(sch)->orb.tm.b)
1127 return -EINVAL;
1128 cc = xsch(sch->schid);
1129 switch (cc) {
1130 case 0:
1131 case 2:
1132 return 0;
1133 case 1:
1134 return -EBUSY;
1135 default:
1136 return -ENODEV;
1137 }
1138}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 6e933aebe013..3b236d20e835 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -3,9 +3,12 @@
3 3
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h>
6#include <asm/chpid.h> 7#include <asm/chpid.h>
8#include <asm/cio.h>
9#include <asm/fcx.h>
10#include <asm/schid.h>
7#include "chsc.h" 11#include "chsc.h"
8#include "schid.h"
9 12
10/* 13/*
11 * path management control word 14 * path management control word
@@ -13,7 +16,7 @@
13struct pmcw { 16struct pmcw {
14 u32 intparm; /* interruption parameter */ 17 u32 intparm; /* interruption parameter */
15 u32 qf : 1; /* qdio facility */ 18 u32 qf : 1; /* qdio facility */
16 u32 res0 : 1; /* reserved zeros */ 19 u32 w : 1;
17 u32 isc : 3; /* interruption sublass */ 20 u32 isc : 3; /* interruption sublass */
18 u32 res5 : 3; /* reserved zeros */ 21 u32 res5 : 3; /* reserved zeros */
19 u32 ena : 1; /* enabled */ 22 u32 ena : 1; /* enabled */
@@ -47,7 +50,7 @@ struct pmcw {
47 */ 50 */
48struct schib { 51struct schib {
49 struct pmcw pmcw; /* path management control word */ 52 struct pmcw pmcw; /* path management control word */
50 struct scsw scsw; /* subchannel status word */ 53 union scsw scsw; /* subchannel status word */
51 __u64 mba; /* measurement block address */ 54 __u64 mba; /* measurement block address */
52 __u8 mda[4]; /* model dependent area */ 55 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 56} __attribute__ ((packed,aligned(4)));
@@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int);
99extern int cio_get_options (struct subchannel *); 102extern int cio_get_options (struct subchannel *);
100extern int cio_modify (struct subchannel *); 103extern int cio_modify (struct subchannel *);
101 104
105int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
106int cio_tm_intrg(struct subchannel *sch);
107
102int cio_create_sch_lock(struct subchannel *); 108int cio_create_sch_lock(struct subchannel *);
103void do_adapter_IO(void); 109void do_adapter_IO(u8 isc);
104void do_IRQ(struct pt_regs *); 110void do_IRQ(struct pt_regs *);
105 111
106/* Use with care. */ 112/* Use with care. */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2808b6833b9e..a90b28c0be57 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev)
341 if (stsch(sch->schid, &sch->schib)) 341 if (stsch(sch->schid, &sch->schib))
342 return -ENODEV; 342 return -ENODEV;
343 343
344 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { 344 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
345 /* Don't copy if a start function is in progress. */ 345 /* Don't copy if a start function is in progress. */
346 if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && 346 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
347 (sch->schib.scsw.actl & 347 (scsw_actl(&sch->schib.scsw) &
348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && 348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
349 (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) 349 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
350 return -EBUSY; 350 return -EBUSY;
351 } 351 }
352 cmb_data = cdev->private->cmb; 352 cmb_data = cdev->private->cmb;
@@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev)
612 free_pages((unsigned long)mem, get_order(size)); 612 free_pages((unsigned long)mem, get_order(size));
613 } else if (!mem) { 613 } else if (!mem) {
614 /* no luck */ 614 /* no luck */
615 printk(KERN_WARNING "cio: failed to allocate area "
616 "for measuring %d subchannels\n",
617 cmb_area.num_channels);
618 ret = -ENOMEM; 615 ret = -ENOMEM;
619 goto out; 616 goto out;
620 } else { 617 } else {
@@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev,
1230 switch (val) { 1227 switch (val) {
1231 case 0: 1228 case 0:
1232 ret = disable_cmf(cdev); 1229 ret = disable_cmf(cdev);
1233 if (ret)
1234 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
1235 break; 1230 break;
1236 case 1: 1231 case 1:
1237 ret = enable_cmf(cdev); 1232 ret = enable_cmf(cdev);
1238 if (ret && ret != -EBUSY)
1239 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
1240 break; 1233 break;
1241 } 1234 }
1242 1235
@@ -1344,8 +1337,7 @@ static int __init init_cmf(void)
1344 * to basic mode. 1337 * to basic mode.
1345 */ 1338 */
1346 if (format == CMF_AUTODETECT) { 1339 if (format == CMF_AUTODETECT) {
1347 if (!css_characteristics_avail || 1340 if (!css_general_characteristics.ext_mb) {
1348 !css_general_characteristics.ext_mb) {
1349 format = CMF_BASIC; 1341 format = CMF_BASIC;
1350 } else { 1342 } else {
1351 format = CMF_EXTENDED; 1343 format = CMF_EXTENDED;
@@ -1365,8 +1357,6 @@ static int __init init_cmf(void)
1365 cmbops = &cmbops_extended; 1357 cmbops = &cmbops_extended;
1366 break; 1358 break;
1367 default: 1359 default:
1368 printk(KERN_ERR "cio: Invalid format %d for channel "
1369 "measurement facility\n", format);
1370 return 1; 1360 return 1;
1371 } 1361 }
1372 1362
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a76956512b2d..46c021d880dc 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 */ 8 */
@@ -14,7 +13,9 @@
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/list.h> 14#include <linux/list.h>
16#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <asm/isc.h>
17 17
18#include "../s390mach.h"
18#include "css.h" 19#include "css.h"
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -30,8 +31,6 @@ static int max_ssid = 0;
30 31
31struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 32struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32 33
33int css_characteristics_avail = 0;
34
35int 34int
36for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 35for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
37{ 36{
@@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid)
121 kfree(sch); 120 kfree(sch);
122 return ERR_PTR(ret); 121 return ERR_PTR(ret);
123 } 122 }
124
125 if (sch->st != SUBCHANNEL_TYPE_IO) {
126 /* For now we ignore all non-io subchannels. */
127 kfree(sch);
128 return ERR_PTR(-EINVAL);
129 }
130
131 /*
132 * Set intparm to subchannel address.
133 * This is fine even on 64bit since the subchannel is always located
134 * under 2G.
135 */
136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
137 ret = cio_modify(sch);
138 if (ret) {
139 kfree(sch->lock);
140 kfree(sch);
141 return ERR_PTR(ret);
142 }
143 return sch; 123 return sch;
144} 124}
145 125
@@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch)
177 return ret; 157 return ret;
178} 158}
179 159
160/**
161 * css_sch_device_unregister - unregister a subchannel
162 * @sch: subchannel to be unregistered
163 */
180void css_sch_device_unregister(struct subchannel *sch) 164void css_sch_device_unregister(struct subchannel *sch)
181{ 165{
182 mutex_lock(&sch->reg_mutex); 166 mutex_lock(&sch->reg_mutex);
183 device_unregister(&sch->dev); 167 if (device_is_registered(&sch->dev))
168 device_unregister(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 169 mutex_unlock(&sch->reg_mutex);
185} 170}
171EXPORT_SYMBOL_GPL(css_sch_device_unregister);
186 172
187static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 173static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
188{ 174{
@@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch)
229 } 215 }
230} 216}
231 217
218static ssize_t type_show(struct device *dev, struct device_attribute *attr,
219 char *buf)
220{
221 struct subchannel *sch = to_subchannel(dev);
222
223 return sprintf(buf, "%01x\n", sch->st);
224}
225
226static DEVICE_ATTR(type, 0444, type_show, NULL);
227
228static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
229 char *buf)
230{
231 struct subchannel *sch = to_subchannel(dev);
232
233 return sprintf(buf, "css:t%01X\n", sch->st);
234}
235
236static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
237
238static struct attribute *subch_attrs[] = {
239 &dev_attr_type.attr,
240 &dev_attr_modalias.attr,
241 NULL,
242};
243
244static struct attribute_group subch_attr_group = {
245 .attrs = subch_attrs,
246};
247
248static struct attribute_group *default_subch_attr_groups[] = {
249 &subch_attr_group,
250 NULL,
251};
252
232static int css_register_subchannel(struct subchannel *sch) 253static int css_register_subchannel(struct subchannel *sch)
233{ 254{
234 int ret; 255 int ret;
@@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch)
237 sch->dev.parent = &channel_subsystems[0]->device; 258 sch->dev.parent = &channel_subsystems[0]->device;
238 sch->dev.bus = &css_bus_type; 259 sch->dev.bus = &css_bus_type;
239 sch->dev.release = &css_subchannel_release; 260 sch->dev.release = &css_subchannel_release;
240 sch->dev.groups = subch_attr_groups; 261 sch->dev.groups = default_subch_attr_groups;
241 /* 262 /*
242 * We don't want to generate uevents for I/O subchannels that don't 263 * We don't want to generate uevents for I/O subchannels that don't
243 * have a working ccw device behind them since they will be 264 * have a working ccw device behind them since they will be
244 * unregistered before they can be used anyway, so we delay the add 265 * unregistered before they can be used anyway, so we delay the add
245 * uevent until after device recognition was successful. 266 * uevent until after device recognition was successful.
267 * Note that we suppress the uevent for all subchannel types;
268 * the subchannel driver can decide itself when it wants to inform
269 * userspace of its existence.
246 */ 270 */
247 if (!cio_is_console(sch->schid)) 271 sch->dev.uevent_suppress = 1;
248 /* Console is special, no need to suppress. */
249 sch->dev.uevent_suppress = 1;
250 css_update_ssd_info(sch); 272 css_update_ssd_info(sch);
251 /* make it known to the system */ 273 /* make it known to the system */
252 ret = css_sch_device_register(sch); 274 ret = css_sch_device_register(sch);
@@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch)
255 sch->schid.ssid, sch->schid.sch_no, ret); 277 sch->schid.ssid, sch->schid.sch_no, ret);
256 return ret; 278 return ret;
257 } 279 }
280 if (!sch->driver) {
281 /*
282 * No driver matched. Generate the uevent now so that
283 * a fitting driver module may be loaded based on the
284 * modalias.
285 */
286 sch->dev.uevent_suppress = 0;
287 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
288 }
258 return ret; 289 return ret;
259} 290}
260 291
261static int css_probe_device(struct subchannel_id schid) 292int css_probe_device(struct subchannel_id schid)
262{ 293{
263 int ret; 294 int ret;
264 struct subchannel *sch; 295 struct subchannel *sch;
@@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib)
301{ 332{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 333 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0; 334 return 0;
335 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
336 return 0;
304 return 1; 337 return 1;
305} 338}
306EXPORT_SYMBOL_GPL(css_sch_is_valid); 339EXPORT_SYMBOL_GPL(css_sch_is_valid);
307 340
308static int css_get_subchannel_status(struct subchannel *sch)
309{
310 struct schib schib;
311
312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
315 return CIO_GONE;
316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
317 return CIO_REVALIDATE;
318 if (!sch->lpm)
319 return CIO_NO_PATH;
320 return CIO_OPER;
321}
322
323static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
324{
325 int event, ret, disc;
326 unsigned long flags;
327 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
328
329 spin_lock_irqsave(sch->lock, flags);
330 disc = device_is_disconnected(sch);
331 if (disc && slow) {
332 /* Disconnected devices are evaluated directly only.*/
333 spin_unlock_irqrestore(sch->lock, flags);
334 return 0;
335 }
336 /* No interrupt after machine check - kill pending timers. */
337 device_kill_pending_timer(sch);
338 if (!disc && !slow) {
339 /* Non-disconnected devices are evaluated on the slow path. */
340 spin_unlock_irqrestore(sch->lock, flags);
341 return -EAGAIN;
342 }
343 event = css_get_subchannel_status(sch);
344 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
345 sch->schid.ssid, sch->schid.sch_no, event,
346 disc ? "disconnected" : "normal",
347 slow ? "slow" : "fast");
348 /* Analyze subchannel status. */
349 action = NONE;
350 switch (event) {
351 case CIO_NO_PATH:
352 if (disc) {
353 /* Check if paths have become available. */
354 action = REPROBE;
355 break;
356 }
357 /* fall through */
358 case CIO_GONE:
359 /* Prevent unwanted effects when opening lock. */
360 cio_disable_subchannel(sch);
361 device_set_disconnected(sch);
362 /* Ask driver what to do with device. */
363 action = UNREGISTER;
364 if (sch->driver && sch->driver->notify) {
365 spin_unlock_irqrestore(sch->lock, flags);
366 ret = sch->driver->notify(sch, event);
367 spin_lock_irqsave(sch->lock, flags);
368 if (ret)
369 action = NONE;
370 }
371 break;
372 case CIO_REVALIDATE:
373 /* Device will be removed, so no notify necessary. */
374 if (disc)
375 /* Reprobe because immediate unregister might block. */
376 action = REPROBE;
377 else
378 action = UNREGISTER_PROBE;
379 break;
380 case CIO_OPER:
381 if (disc)
382 /* Get device operational again. */
383 action = REPROBE;
384 break;
385 }
386 /* Perform action. */
387 ret = 0;
388 switch (action) {
389 case UNREGISTER:
390 case UNREGISTER_PROBE:
391 /* Unregister device (will use subchannel lock). */
392 spin_unlock_irqrestore(sch->lock, flags);
393 css_sch_device_unregister(sch);
394 spin_lock_irqsave(sch->lock, flags);
395
396 /* Reset intparm to zeroes. */
397 sch->schib.pmcw.intparm = 0;
398 cio_modify(sch);
399 break;
400 case REPROBE:
401 device_trigger_reprobe(sch);
402 break;
403 default:
404 break;
405 }
406 spin_unlock_irqrestore(sch->lock, flags);
407 /* Probe if necessary. */
408 if (action == UNREGISTER_PROBE)
409 ret = css_probe_device(sch->schid);
410
411 return ret;
412}
413
414static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 341static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
415{ 342{
416 struct schib schib; 343 struct schib schib;
@@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
429 return css_probe_device(schid); 356 return css_probe_device(schid);
430} 357}
431 358
359static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
360{
361 int ret = 0;
362
363 if (sch->driver) {
364 if (sch->driver->sch_event)
365 ret = sch->driver->sch_event(sch, slow);
366 else
367 dev_dbg(&sch->dev,
368 "Got subchannel machine check but "
369 "no sch_event handler provided.\n");
370 }
371 return ret;
372}
373
432static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 374static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
433{ 375{
434 struct subchannel *sch; 376 struct subchannel *sch;
@@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
596/* 538/*
597 * Called from the machine check handler for subchannel report words. 539 * Called from the machine check handler for subchannel report words.
598 */ 540 */
599void css_process_crw(int rsid1, int rsid2) 541static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
600{ 542{
601 struct subchannel_id mchk_schid; 543 struct subchannel_id mchk_schid;
602 544
603 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 545 if (overflow) {
604 rsid1, rsid2); 546 css_schedule_eval_all();
547 return;
548 }
549 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
550 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
551 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
552 crw0->erc, crw0->rsid);
553 if (crw1)
554 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
555 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
556 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
557 crw1->anc, crw1->erc, crw1->rsid);
605 init_subchannel_id(&mchk_schid); 558 init_subchannel_id(&mchk_schid);
606 mchk_schid.sch_no = rsid1; 559 mchk_schid.sch_no = crw0->rsid;
607 if (rsid2 != 0) 560 if (crw1)
608 mchk_schid.ssid = (rsid2 >> 8) & 3; 561 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
609 562
610 /* 563 /*
611 * Since we are always presented with IPI in the CRW, we have to 564 * Since we are always presented with IPI in the CRW, we have to
612 * use stsch() to find out if the subchannel in question has come 565 * use stsch() to find out if the subchannel in question has come
613 * or gone. 566 * or gone.
@@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
658static void __init 611static void __init
659css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 612css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
660{ 613{
661 if (css_characteristics_avail && css_general_characteristics.mcss) { 614 if (css_general_characteristics.mcss) {
662 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 615 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
663 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 616 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
664 } else { 617 } else {
@@ -795,8 +748,6 @@ init_channel_subsystem (void)
795 ret = chsc_determine_css_characteristics(); 748 ret = chsc_determine_css_characteristics();
796 if (ret == -ENOMEM) 749 if (ret == -ENOMEM)
797 goto out; /* No need to continue. */ 750 goto out; /* No need to continue. */
798 if (ret == 0)
799 css_characteristics_avail = 1;
800 751
801 ret = chsc_alloc_sei_area(); 752 ret = chsc_alloc_sei_area();
802 if (ret) 753 if (ret)
@@ -806,6 +757,10 @@ init_channel_subsystem (void)
806 if (ret) 757 if (ret)
807 goto out; 758 goto out;
808 759
760 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
761 if (ret)
762 goto out;
763
809 if ((ret = bus_register(&css_bus_type))) 764 if ((ret = bus_register(&css_bus_type)))
810 goto out; 765 goto out;
811 766
@@ -836,8 +791,7 @@ init_channel_subsystem (void)
836 ret = device_register(&css->device); 791 ret = device_register(&css->device);
837 if (ret) 792 if (ret)
838 goto out_free_all; 793 goto out_free_all;
839 if (css_characteristics_avail && 794 if (css_chsc_characteristics.secm) {
840 css_chsc_characteristics.secm) {
841 ret = device_create_file(&css->device, 795 ret = device_create_file(&css->device,
842 &dev_attr_cm_enable); 796 &dev_attr_cm_enable);
843 if (ret) 797 if (ret)
@@ -852,7 +806,8 @@ init_channel_subsystem (void)
852 goto out_pseudo; 806 goto out_pseudo;
853 css_init_done = 1; 807 css_init_done = 1;
854 808
855 ctl_set_bit(6, 28); 809 /* Enable default isc for I/O subchannels. */
810 isc_register(IO_SCH_ISC);
856 811
857 for_each_subchannel(__init_channel_subsystem, NULL); 812 for_each_subchannel(__init_channel_subsystem, NULL);
858 return 0; 813 return 0;
@@ -875,7 +830,7 @@ out_unregister:
875 i--; 830 i--;
876 css = channel_subsystems[i]; 831 css = channel_subsystems[i];
877 device_unregister(&css->pseudo_subchannel->dev); 832 device_unregister(&css->pseudo_subchannel->dev);
878 if (css_characteristics_avail && css_chsc_characteristics.secm) 833 if (css_chsc_characteristics.secm)
879 device_remove_file(&css->device, 834 device_remove_file(&css->device,
880 &dev_attr_cm_enable); 835 &dev_attr_cm_enable);
881 device_unregister(&css->device); 836 device_unregister(&css->device);
@@ -883,6 +838,7 @@ out_unregister:
883out_bus: 838out_bus:
884 bus_unregister(&css_bus_type); 839 bus_unregister(&css_bus_type);
885out: 840out:
841 s390_unregister_crw_handler(CRW_RSC_CSS);
886 chsc_free_sei_area(); 842 chsc_free_sei_area();
887 kfree(slow_subchannel_set); 843 kfree(slow_subchannel_set);
888 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 844 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
@@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch)
895 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 851 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
896} 852}
897 853
898/* 854static int css_bus_match(struct device *dev, struct device_driver *drv)
899 * find a driver for a subchannel. They identify by the subchannel
900 * type with the exception that the console subchannel driver has its own
901 * subchannel type although the device is an i/o subchannel
902 */
903static int
904css_bus_match (struct device *dev, struct device_driver *drv)
905{ 855{
906 struct subchannel *sch = to_subchannel(dev); 856 struct subchannel *sch = to_subchannel(dev);
907 struct css_driver *driver = to_cssdriver(drv); 857 struct css_driver *driver = to_cssdriver(drv);
858 struct css_device_id *id;
908 859
909 if (sch->st == driver->subchannel_type) 860 for (id = driver->subchannel_type; id->match_flags; id++) {
910 return 1; 861 if (sch->st == id->type)
862 return 1;
863 }
911 864
912 return 0; 865 return 0;
913} 866}
@@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev)
945 sch->driver->shutdown(sch); 898 sch->driver->shutdown(sch);
946} 899}
947 900
901static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
902{
903 struct subchannel *sch = to_subchannel(dev);
904 int ret;
905
906 ret = add_uevent_var(env, "ST=%01X", sch->st);
907 if (ret)
908 return ret;
909 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
910 return ret;
911}
912
948struct bus_type css_bus_type = { 913struct bus_type css_bus_type = {
949 .name = "css", 914 .name = "css",
950 .match = css_bus_match, 915 .match = css_bus_match,
951 .probe = css_probe, 916 .probe = css_probe,
952 .remove = css_remove, 917 .remove = css_remove,
953 .shutdown = css_shutdown, 918 .shutdown = css_shutdown,
919 .uevent = css_uevent,
954}; 920};
955 921
956/** 922/**
@@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem);
985 951
986MODULE_LICENSE("GPL"); 952MODULE_LICENSE("GPL");
987EXPORT_SYMBOL(css_bus_type); 953EXPORT_SYMBOL(css_bus_type);
988EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index e1913518f354..57ebf120f825 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -9,8 +9,7 @@
9 9
10#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12 12#include <asm/schid.h>
13#include "schid.h"
14 13
15/* 14/*
16 * path grouping stuff 15 * path grouping stuff
@@ -58,20 +57,28 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 57 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 58} __attribute__ ((packed));
60 59
61/*
62 * A css driver handles all subchannels of one type.
63 * Currently, we only care about I/O subchannels (type 0), these
64 * have a ccw_device connected to them.
65 */
66struct subchannel; 60struct subchannel;
61struct chp_link;
62/**
63 * struct css_driver - device driver for subchannels
64 * @owner: owning module
65 * @subchannel_type: subchannel type supported by this driver
66 * @drv: embedded device driver structure
67 * @irq: called on interrupts
68 * @chp_event: called for events affecting a channel path
69 * @sch_event: called for events affecting the subchannel
70 * @probe: function called on probe
71 * @remove: function called on remove
72 * @shutdown: called at device shutdown
73 * @name: name of the device driver
74 */
67struct css_driver { 75struct css_driver {
68 struct module *owner; 76 struct module *owner;
69 unsigned int subchannel_type; 77 struct css_device_id *subchannel_type;
70 struct device_driver drv; 78 struct device_driver drv;
71 void (*irq)(struct subchannel *); 79 void (*irq)(struct subchannel *);
72 int (*notify)(struct subchannel *, int); 80 int (*chp_event)(struct subchannel *, struct chp_link *, int);
73 void (*verify)(struct subchannel *); 81 int (*sch_event)(struct subchannel *, int);
74 void (*termination)(struct subchannel *);
75 int (*probe)(struct subchannel *); 82 int (*probe)(struct subchannel *);
76 int (*remove)(struct subchannel *); 83 int (*remove)(struct subchannel *);
77 void (*shutdown)(struct subchannel *); 84 void (*shutdown)(struct subchannel *);
@@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *); 96extern void css_driver_unregister(struct css_driver *);
90 97
91extern void css_sch_device_unregister(struct subchannel *); 98extern void css_sch_device_unregister(struct subchannel *);
92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 99extern int css_probe_device(struct subchannel_id);
100extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
93extern int css_init_done; 101extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 102int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id, 103 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data); 104 void *), void *data);
97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 105extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
98extern void css_process_crw(int, int);
99extern void css_reiterate_subchannels(void); 106extern void css_reiterate_subchannels(void);
100void css_update_ssd_info(struct subchannel *sch); 107void css_update_ssd_info(struct subchannel *sch);
101 108
@@ -121,20 +128,6 @@ struct channel_subsystem {
121extern struct bus_type css_bus_type; 128extern struct bus_type css_bus_type;
122extern struct channel_subsystem *channel_subsystems[]; 129extern struct channel_subsystem *channel_subsystems[];
123 130
124/* Some helper functions for disconnected state. */
125int device_is_disconnected(struct subchannel *);
126void device_set_disconnected(struct subchannel *);
127void device_trigger_reprobe(struct subchannel *);
128
129/* Helper functions for vary on/off. */
130int device_is_online(struct subchannel *);
131void device_kill_io(struct subchannel *);
132void device_set_intretry(struct subchannel *sch);
133int device_trigger_verify(struct subchannel *sch);
134
135/* Machine check helper function. */
136void device_kill_pending_timer(struct subchannel *);
137
138/* Helper functions to build lists for the slow path. */ 131/* Helper functions to build lists for the slow path. */
139void css_schedule_eval(struct subchannel_id schid); 132void css_schedule_eval(struct subchannel_id schid);
140void css_schedule_eval_all(void); 133void css_schedule_eval_all(void);
@@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *);
145 138
146extern struct workqueue_struct *slow_path_wq; 139extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void); 140void css_wait_for_slow_path(void);
148
149extern struct attribute_group *subch_attr_groups[];
150#endif 141#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e22813db74a2..e818d0c54c09 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -23,7 +22,9 @@
23#include <asm/cio.h> 22#include <asm/cio.h>
24#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
25#include <asm/cmb.h> 24#include <asm/cmb.h>
25#include <asm/isc.h>
26 26
27#include "chp.h"
27#include "cio.h" 28#include "cio.h"
28#include "cio_debug.h" 29#include "cio_debug.h"
29#include "css.h" 30#include "css.h"
@@ -125,19 +126,24 @@ struct bus_type ccw_bus_type;
125static void io_subchannel_irq(struct subchannel *); 126static void io_subchannel_irq(struct subchannel *);
126static int io_subchannel_probe(struct subchannel *); 127static int io_subchannel_probe(struct subchannel *);
127static int io_subchannel_remove(struct subchannel *); 128static int io_subchannel_remove(struct subchannel *);
128static int io_subchannel_notify(struct subchannel *, int);
129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
131static void io_subchannel_shutdown(struct subchannel *); 129static void io_subchannel_shutdown(struct subchannel *);
130static int io_subchannel_sch_event(struct subchannel *, int);
131static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
132 int);
133
134static struct css_device_id io_subchannel_ids[] = {
135 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
136 { /* end of list */ },
137};
138MODULE_DEVICE_TABLE(css, io_subchannel_ids);
132 139
133static struct css_driver io_subchannel_driver = { 140static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
135 .subchannel_type = SUBCHANNEL_TYPE_IO, 142 .subchannel_type = io_subchannel_ids,
136 .name = "io_subchannel", 143 .name = "io_subchannel",
137 .irq = io_subchannel_irq, 144 .irq = io_subchannel_irq,
138 .notify = io_subchannel_notify, 145 .sch_event = io_subchannel_sch_event,
139 .verify = io_subchannel_verify, 146 .chp_event = io_subchannel_chp_event,
140 .termination = io_subchannel_ioterm,
141 .probe = io_subchannel_probe, 147 .probe = io_subchannel_probe,
142 .remove = io_subchannel_remove, 148 .remove = io_subchannel_remove,
143 .shutdown = io_subchannel_shutdown, 149 .shutdown = io_subchannel_shutdown,
@@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
487 ccw_device_set_online(cdev); 493 ccw_device_set_online(cdev);
488 return 0; 494 return 0;
489} 495}
490static void online_store_handle_online(struct ccw_device *cdev, int force) 496static int online_store_handle_online(struct ccw_device *cdev, int force)
491{ 497{
492 int ret; 498 int ret;
493 499
494 ret = online_store_recog_and_online(cdev); 500 ret = online_store_recog_and_online(cdev);
495 if (ret) 501 if (ret)
496 return; 502 return ret;
497 if (force && cdev->private->state == DEV_STATE_BOXED) { 503 if (force && cdev->private->state == DEV_STATE_BOXED) {
498 ret = ccw_device_stlck(cdev); 504 ret = ccw_device_stlck(cdev);
499 if (ret) { 505 if (ret)
500 dev_warn(&cdev->dev, 506 return ret;
501 "ccw_device_stlck returned %d!\n", ret);
502 return;
503 }
504 if (cdev->id.cu_type == 0) 507 if (cdev->id.cu_type == 0)
505 cdev->private->state = DEV_STATE_NOT_OPER; 508 cdev->private->state = DEV_STATE_NOT_OPER;
506 online_store_recog_and_online(cdev); 509 online_store_recog_and_online(cdev);
507 } 510 }
508 511 return 0;
509} 512}
510 513
511static ssize_t online_store (struct device *dev, struct device_attribute *attr, 514static ssize_t online_store (struct device *dev, struct device_attribute *attr,
@@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
538 ret = count; 541 ret = count;
539 break; 542 break;
540 case 1: 543 case 1:
541 online_store_handle_online(cdev, force); 544 ret = online_store_handle_online(cdev, force);
542 ret = count; 545 if (!ret)
546 ret = count;
543 break; 547 break;
544 default: 548 default:
545 ret = -EINVAL; 549 ret = -EINVAL;
@@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
584static DEVICE_ATTR(online, 0644, online_show, online_store); 588static DEVICE_ATTR(online, 0644, online_show, online_store);
585static DEVICE_ATTR(availability, 0444, available_show, NULL); 589static DEVICE_ATTR(availability, 0444, available_show, NULL);
586 590
587static struct attribute * subch_attrs[] = { 591static struct attribute *io_subchannel_attrs[] = {
588 &dev_attr_chpids.attr, 592 &dev_attr_chpids.attr,
589 &dev_attr_pimpampom.attr, 593 &dev_attr_pimpampom.attr,
590 NULL, 594 NULL,
591}; 595};
592 596
593static struct attribute_group subch_attr_group = { 597static struct attribute_group io_subchannel_attr_group = {
594 .attrs = subch_attrs, 598 .attrs = io_subchannel_attrs,
595};
596
597struct attribute_group *subch_attr_groups[] = {
598 &subch_attr_group,
599 NULL,
600}; 599};
601 600
602static struct attribute * ccwdev_attrs[] = { 601static struct attribute * ccwdev_attrs[] = {
@@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch,
790 sch_set_cdev(sch, cdev); 789 sch_set_cdev(sch, cdev);
791 cdev->private->schid = sch->schid; 790 cdev->private->schid = sch->schid;
792 cdev->ccwlock = sch->lock; 791 cdev->ccwlock = sch->lock;
793 device_trigger_reprobe(sch); 792 ccw_device_trigger_reprobe(cdev);
794 spin_unlock_irq(sch->lock); 793 spin_unlock_irq(sch->lock);
795} 794}
796 795
@@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1037 struct ccw_device_private *priv; 1036 struct ccw_device_private *priv;
1038 1037
1039 sch_set_cdev(sch, cdev); 1038 sch_set_cdev(sch, cdev);
1040 sch->driver = &io_subchannel_driver;
1041 cdev->ccwlock = sch->lock; 1039 cdev->ccwlock = sch->lock;
1042 1040
1043 /* Init private data. */ 1041 /* Init private data. */
@@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch)
1122 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1120 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1123} 1121}
1124 1122
1125static int 1123static void io_subchannel_init_fields(struct subchannel *sch)
1126io_subchannel_probe (struct subchannel *sch) 1124{
1125 if (cio_is_console(sch->schid))
1126 sch->opm = 0xff;
1127 else
1128 sch->opm = chp_get_sch_opm(sch);
1129 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1130 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1131
1132 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1133 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1134 sch->schib.pmcw.dev, sch->schid.ssid,
1135 sch->schid.sch_no, sch->schib.pmcw.pim,
1136 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1137 /* Initially set up some fields in the pmcw. */
1138 sch->schib.pmcw.ena = 0;
1139 sch->schib.pmcw.csense = 1; /* concurrent sense */
1140 if ((sch->lpm & (sch->lpm - 1)) != 0)
1141 sch->schib.pmcw.mp = 1; /* multipath mode */
1142 /* clean up possible residual cmf stuff */
1143 sch->schib.pmcw.mme = 0;
1144 sch->schib.pmcw.mbfc = 0;
1145 sch->schib.pmcw.mbi = 0;
1146 sch->schib.mba = 0;
1147}
1148
1149static int io_subchannel_probe(struct subchannel *sch)
1127{ 1150{
1128 struct ccw_device *cdev; 1151 struct ccw_device *cdev;
1129 int rc; 1152 int rc;
@@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch)
1132 1155
1133 cdev = sch_get_cdev(sch); 1156 cdev = sch_get_cdev(sch);
1134 if (cdev) { 1157 if (cdev) {
1158 rc = sysfs_create_group(&sch->dev.kobj,
1159 &io_subchannel_attr_group);
1160 if (rc)
1161 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1162 "attributes for subchannel "
1163 "0.%x.%04x (rc=%d)\n",
1164 sch->schid.ssid, sch->schid.sch_no, rc);
1135 /* 1165 /*
1136 * This subchannel already has an associated ccw_device. 1166 * This subchannel already has an associated ccw_device.
1137 * Register it and exit. This happens for all early 1167 * Throw the delayed uevent for the subchannel, register
1138 * device, e.g. the console. 1168 * the ccw_device and exit. This happens for all early
1169 * devices, e.g. the console.
1139 */ 1170 */
1171 sch->dev.uevent_suppress = 0;
1172 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1140 cdev->dev.groups = ccwdev_attr_groups; 1173 cdev->dev.groups = ccwdev_attr_groups;
1141 device_initialize(&cdev->dev); 1174 device_initialize(&cdev->dev);
1142 ccw_device_register(cdev); 1175 ccw_device_register(cdev);
@@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch)
1152 get_device(&cdev->dev); 1185 get_device(&cdev->dev);
1153 return 0; 1186 return 0;
1154 } 1187 }
1188 io_subchannel_init_fields(sch);
1155 /* 1189 /*
1156 * First check if a fitting device may be found amongst the 1190 * First check if a fitting device may be found amongst the
1157 * disconnected devices or in the orphanage. 1191 * disconnected devices or in the orphanage.
1158 */ 1192 */
1159 dev_id.devno = sch->schib.pmcw.dev; 1193 dev_id.devno = sch->schib.pmcw.dev;
1160 dev_id.ssid = sch->schid.ssid; 1194 dev_id.ssid = sch->schid.ssid;
1195 rc = sysfs_create_group(&sch->dev.kobj,
1196 &io_subchannel_attr_group);
1197 if (rc)
1198 return rc;
1161 /* Allocate I/O subchannel private data. */ 1199 /* Allocate I/O subchannel private data. */
1162 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1200 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1163 GFP_KERNEL | GFP_DMA); 1201 GFP_KERNEL | GFP_DMA);
1164 if (!sch->private) 1202 if (!sch->private) {
1165 return -ENOMEM; 1203 rc = -ENOMEM;
1204 goto out_err;
1205 }
1166 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1206 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1167 if (!cdev) 1207 if (!cdev)
1168 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1208 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch)
1181 } 1221 }
1182 cdev = io_subchannel_create_ccwdev(sch); 1222 cdev = io_subchannel_create_ccwdev(sch);
1183 if (IS_ERR(cdev)) { 1223 if (IS_ERR(cdev)) {
1184 kfree(sch->private); 1224 rc = PTR_ERR(cdev);
1185 return PTR_ERR(cdev); 1225 goto out_err;
1186 } 1226 }
1187 rc = io_subchannel_recog(cdev, sch); 1227 rc = io_subchannel_recog(cdev, sch);
1188 if (rc) { 1228 if (rc) {
@@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch)
1191 spin_unlock_irqrestore(sch->lock, flags); 1231 spin_unlock_irqrestore(sch->lock, flags);
1192 if (cdev->dev.release) 1232 if (cdev->dev.release)
1193 cdev->dev.release(&cdev->dev); 1233 cdev->dev.release(&cdev->dev);
1194 kfree(sch->private); 1234 goto out_err;
1195 } 1235 }
1196 1236 return 0;
1237out_err:
1238 kfree(sch->private);
1239 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1197 return rc; 1240 return rc;
1198} 1241}
1199 1242
@@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch)
1214 ccw_device_unregister(cdev); 1257 ccw_device_unregister(cdev);
1215 put_device(&cdev->dev); 1258 put_device(&cdev->dev);
1216 kfree(sch->private); 1259 kfree(sch->private);
1260 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1217 return 0; 1261 return 0;
1218} 1262}
1219 1263
@@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
1224 cdev = sch_get_cdev(sch); 1268 cdev = sch_get_cdev(sch);
1225 if (!cdev) 1269 if (!cdev)
1226 return 0; 1270 return 0;
1227 if (!cdev->drv) 1271 return ccw_device_notify(cdev, event);
1228 return 0;
1229 if (!cdev->online)
1230 return 0;
1231 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1232} 1272}
1233 1273
1234static void io_subchannel_verify(struct subchannel *sch) 1274static void io_subchannel_verify(struct subchannel *sch)
@@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch)
1240 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1280 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1241} 1281}
1242 1282
1243static void io_subchannel_ioterm(struct subchannel *sch) 1283static int check_for_io_on_path(struct subchannel *sch, int mask)
1244{ 1284{
1245 struct ccw_device *cdev; 1285 int cc;
1246 1286
1247 cdev = sch_get_cdev(sch); 1287 cc = stsch(sch->schid, &sch->schib);
1248 if (!cdev) 1288 if (cc)
1249 return; 1289 return 0;
1250 /* Internal I/O will be retried by the interrupt handler. */ 1290 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1251 if (cdev->private->flags.intretry) 1291 return 1;
1292 return 0;
1293}
1294
1295static void terminate_internal_io(struct subchannel *sch,
1296 struct ccw_device *cdev)
1297{
1298 if (cio_clear(sch)) {
1299 /* Recheck device in case clear failed. */
1300 sch->lpm = 0;
1301 if (cdev->online)
1302 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1303 else
1304 css_schedule_eval(sch->schid);
1252 return; 1305 return;
1306 }
1253 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1307 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1308 /* Request retry of internal operation. */
1309 cdev->private->flags.intretry = 1;
1310 /* Call handler. */
1254 if (cdev->handler) 1311 if (cdev->handler)
1255 cdev->handler(cdev, cdev->private->intparm, 1312 cdev->handler(cdev, cdev->private->intparm,
1256 ERR_PTR(-EIO)); 1313 ERR_PTR(-EIO));
1257} 1314}
1258 1315
1316static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1317{
1318 struct ccw_device *cdev;
1319
1320 cdev = sch_get_cdev(sch);
1321 if (!cdev)
1322 return;
1323 if (check_for_io_on_path(sch, mask)) {
1324 if (cdev->private->state == DEV_STATE_ONLINE)
1325 ccw_device_kill_io(cdev);
1326 else {
1327 terminate_internal_io(sch, cdev);
1328 /* Re-start path verification. */
1329 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1330 }
1331 } else
1332 /* trigger path verification. */
1333 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1334
1335}
1336
1337static int io_subchannel_chp_event(struct subchannel *sch,
1338 struct chp_link *link, int event)
1339{
1340 int mask;
1341
1342 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1343 if (!mask)
1344 return 0;
1345 switch (event) {
1346 case CHP_VARY_OFF:
1347 sch->opm &= ~mask;
1348 sch->lpm &= ~mask;
1349 io_subchannel_terminate_path(sch, mask);
1350 break;
1351 case CHP_VARY_ON:
1352 sch->opm |= mask;
1353 sch->lpm |= mask;
1354 io_subchannel_verify(sch);
1355 break;
1356 case CHP_OFFLINE:
1357 if (stsch(sch->schid, &sch->schib))
1358 return -ENXIO;
1359 if (!css_sch_is_valid(&sch->schib))
1360 return -ENODEV;
1361 io_subchannel_terminate_path(sch, mask);
1362 break;
1363 case CHP_ONLINE:
1364 if (stsch(sch->schid, &sch->schib))
1365 return -ENXIO;
1366 sch->lpm |= mask & sch->opm;
1367 io_subchannel_verify(sch);
1368 break;
1369 }
1370 return 0;
1371}
1372
1259static void 1373static void
1260io_subchannel_shutdown(struct subchannel *sch) 1374io_subchannel_shutdown(struct subchannel *sch)
1261{ 1375{
@@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch)
1285 cio_disable_subchannel(sch); 1399 cio_disable_subchannel(sch);
1286} 1400}
1287 1401
1402static int io_subchannel_get_status(struct subchannel *sch)
1403{
1404 struct schib schib;
1405
1406 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1407 return CIO_GONE;
1408 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1409 return CIO_REVALIDATE;
1410 if (!sch->lpm)
1411 return CIO_NO_PATH;
1412 return CIO_OPER;
1413}
1414
1415static int device_is_disconnected(struct ccw_device *cdev)
1416{
1417 if (!cdev)
1418 return 0;
1419 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1420 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1421}
1422
1423static int recovery_check(struct device *dev, void *data)
1424{
1425 struct ccw_device *cdev = to_ccwdev(dev);
1426 int *redo = data;
1427
1428 spin_lock_irq(cdev->ccwlock);
1429 switch (cdev->private->state) {
1430 case DEV_STATE_DISCONNECTED:
1431 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1432 cdev->private->dev_id.ssid,
1433 cdev->private->dev_id.devno);
1434 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1435 *redo = 1;
1436 break;
1437 case DEV_STATE_DISCONNECTED_SENSE_ID:
1438 *redo = 1;
1439 break;
1440 }
1441 spin_unlock_irq(cdev->ccwlock);
1442
1443 return 0;
1444}
1445
1446static void recovery_work_func(struct work_struct *unused)
1447{
1448 int redo = 0;
1449
1450 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1451 if (redo) {
1452 spin_lock_irq(&recovery_lock);
1453 if (!timer_pending(&recovery_timer)) {
1454 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1455 recovery_phase++;
1456 mod_timer(&recovery_timer, jiffies +
1457 recovery_delay[recovery_phase] * HZ);
1458 }
1459 spin_unlock_irq(&recovery_lock);
1460 } else
1461 CIO_MSG_EVENT(4, "recovery: end\n");
1462}
1463
1464static DECLARE_WORK(recovery_work, recovery_work_func);
1465
1466static void recovery_func(unsigned long data)
1467{
1468 /*
1469 * We can't do our recovery in softirq context and it's not
1470 * performance critical, so we schedule it.
1471 */
1472 schedule_work(&recovery_work);
1473}
1474
1475static void ccw_device_schedule_recovery(void)
1476{
1477 unsigned long flags;
1478
1479 CIO_MSG_EVENT(4, "recovery: schedule\n");
1480 spin_lock_irqsave(&recovery_lock, flags);
1481 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1482 recovery_phase = 0;
1483 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1484 }
1485 spin_unlock_irqrestore(&recovery_lock, flags);
1486}
1487
1488static void device_set_disconnected(struct ccw_device *cdev)
1489{
1490 if (!cdev)
1491 return;
1492 ccw_device_set_timeout(cdev, 0);
1493 cdev->private->flags.fake_irb = 0;
1494 cdev->private->state = DEV_STATE_DISCONNECTED;
1495 if (cdev->online)
1496 ccw_device_schedule_recovery();
1497}
1498
1499static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1500{
1501 int event, ret, disc;
1502 unsigned long flags;
1503 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1504 struct ccw_device *cdev;
1505
1506 spin_lock_irqsave(sch->lock, flags);
1507 cdev = sch_get_cdev(sch);
1508 disc = device_is_disconnected(cdev);
1509 if (disc && slow) {
1510 /* Disconnected devices are evaluated directly only.*/
1511 spin_unlock_irqrestore(sch->lock, flags);
1512 return 0;
1513 }
1514 /* No interrupt after machine check - kill pending timers. */
1515 if (cdev)
1516 ccw_device_set_timeout(cdev, 0);
1517 if (!disc && !slow) {
1518 /* Non-disconnected devices are evaluated on the slow path. */
1519 spin_unlock_irqrestore(sch->lock, flags);
1520 return -EAGAIN;
1521 }
1522 event = io_subchannel_get_status(sch);
1523 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1524 sch->schid.ssid, sch->schid.sch_no, event,
1525 disc ? "disconnected" : "normal",
1526 slow ? "slow" : "fast");
1527 /* Analyze subchannel status. */
1528 action = NONE;
1529 switch (event) {
1530 case CIO_NO_PATH:
1531 if (disc) {
1532 /* Check if paths have become available. */
1533 action = REPROBE;
1534 break;
1535 }
1536 /* fall through */
1537 case CIO_GONE:
1538 /* Prevent unwanted effects when opening lock. */
1539 cio_disable_subchannel(sch);
1540 device_set_disconnected(cdev);
1541 /* Ask driver what to do with device. */
1542 action = UNREGISTER;
1543 spin_unlock_irqrestore(sch->lock, flags);
1544 ret = io_subchannel_notify(sch, event);
1545 spin_lock_irqsave(sch->lock, flags);
1546 if (ret)
1547 action = NONE;
1548 break;
1549 case CIO_REVALIDATE:
1550 /* Device will be removed, so no notify necessary. */
1551 if (disc)
1552 /* Reprobe because immediate unregister might block. */
1553 action = REPROBE;
1554 else
1555 action = UNREGISTER_PROBE;
1556 break;
1557 case CIO_OPER:
1558 if (disc)
1559 /* Get device operational again. */
1560 action = REPROBE;
1561 break;
1562 }
1563 /* Perform action. */
1564 ret = 0;
1565 switch (action) {
1566 case UNREGISTER:
1567 case UNREGISTER_PROBE:
1568 /* Unregister device (will use subchannel lock). */
1569 spin_unlock_irqrestore(sch->lock, flags);
1570 css_sch_device_unregister(sch);
1571 spin_lock_irqsave(sch->lock, flags);
1572
1573 /* Reset intparm to zeroes. */
1574 sch->schib.pmcw.intparm = 0;
1575 cio_modify(sch);
1576 break;
1577 case REPROBE:
1578 ccw_device_trigger_reprobe(cdev);
1579 break;
1580 default:
1581 break;
1582 }
1583 spin_unlock_irqrestore(sch->lock, flags);
1584 /* Probe if necessary. */
1585 if (action == UNREGISTER_PROBE)
1586 ret = css_probe_device(sch->schid);
1587
1588 return ret;
1589}
1590
1288#ifdef CONFIG_CCW_CONSOLE 1591#ifdef CONFIG_CCW_CONSOLE
1289static struct ccw_device console_cdev; 1592static struct ccw_device console_cdev;
1290static struct ccw_device_private console_private; 1593static struct ccw_device_private console_private;
@@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void)
1297 return &ccw_console_lock; 1600 return &ccw_console_lock;
1298} 1601}
1299 1602
1300static int 1603static int ccw_device_console_enable(struct ccw_device *cdev,
1301ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1604 struct subchannel *sch)
1302{ 1605{
1303 int rc; 1606 int rc;
1304 1607
1305 /* Attach subchannel private data. */ 1608 /* Attach subchannel private data. */
1306 sch->private = cio_get_console_priv(); 1609 sch->private = cio_get_console_priv();
1307 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1610 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1611 io_subchannel_init_fields(sch);
1612 sch->driver = &io_subchannel_driver;
1308 /* Initialize the ccw_device structure. */ 1613 /* Initialize the ccw_device structure. */
1309 cdev->dev.parent= &sch->dev; 1614 cdev->dev.parent= &sch->dev;
1310 rc = io_subchannel_recog(cdev, sch); 1615 rc = io_subchannel_recog(cdev, sch);
@@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1515 return sch->schid; 1820 return sch->schid;
1516} 1821}
1517 1822
1518static int recovery_check(struct device *dev, void *data)
1519{
1520 struct ccw_device *cdev = to_ccwdev(dev);
1521 int *redo = data;
1522
1523 spin_lock_irq(cdev->ccwlock);
1524 switch (cdev->private->state) {
1525 case DEV_STATE_DISCONNECTED:
1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1527 cdev->private->dev_id.ssid,
1528 cdev->private->dev_id.devno);
1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1530 *redo = 1;
1531 break;
1532 case DEV_STATE_DISCONNECTED_SENSE_ID:
1533 *redo = 1;
1534 break;
1535 }
1536 spin_unlock_irq(cdev->ccwlock);
1537
1538 return 0;
1539}
1540
1541static void recovery_work_func(struct work_struct *unused)
1542{
1543 int redo = 0;
1544
1545 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1546 if (redo) {
1547 spin_lock_irq(&recovery_lock);
1548 if (!timer_pending(&recovery_timer)) {
1549 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1550 recovery_phase++;
1551 mod_timer(&recovery_timer, jiffies +
1552 recovery_delay[recovery_phase] * HZ);
1553 }
1554 spin_unlock_irq(&recovery_lock);
1555 } else
1556 CIO_MSG_EVENT(4, "recovery: end\n");
1557}
1558
1559static DECLARE_WORK(recovery_work, recovery_work_func);
1560
1561static void recovery_func(unsigned long data)
1562{
1563 /*
1564 * We can't do our recovery in softirq context and it's not
1565 * performance critical, so we schedule it.
1566 */
1567 schedule_work(&recovery_work);
1568}
1569
1570void ccw_device_schedule_recovery(void)
1571{
1572 unsigned long flags;
1573
1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1575 spin_lock_irqsave(&recovery_lock, flags);
1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1577 recovery_phase = 0;
1578 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1579 }
1580 spin_unlock_irqrestore(&recovery_lock, flags);
1581}
1582
1583MODULE_LICENSE("GPL"); 1823MODULE_LICENSE("GPL");
1584EXPORT_SYMBOL(ccw_device_set_online); 1824EXPORT_SYMBOL(ccw_device_set_online);
1585EXPORT_SYMBOL(ccw_device_set_offline); 1825EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index cb08092be39f..9800a8335a3f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
88int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
89int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
90 90
91void ccw_device_schedule_recovery(void);
92
93/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 93void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
118 116
119int ccw_device_stlck(struct ccw_device *); 117int ccw_device_stlck(struct ccw_device *);
120 118
119/* Helper function for machine check handling. */
120void ccw_device_trigger_reprobe(struct ccw_device *);
121void ccw_device_kill_io(struct ccw_device *);
122int ccw_device_notify(struct ccw_device *, int);
123
121/* qdio needs this. */ 124/* qdio needs this. */
122void ccw_device_set_timeout(struct ccw_device *, int); 125void ccw_device_set_timeout(struct ccw_device *, int);
123extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 126extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e268d5a77c12..8b5fe57fb2f3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device_fsm.c 2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling 3 * finite state machine for device handling
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 8 */
@@ -27,65 +26,6 @@
27 26
28static int timeout_log_enabled; 27static int timeout_log_enabled;
29 28
30int
31device_is_online(struct subchannel *sch)
32{
33 struct ccw_device *cdev;
34
35 cdev = sch_get_cdev(sch);
36 if (!cdev)
37 return 0;
38 return (cdev->private->state == DEV_STATE_ONLINE);
39}
40
41int
42device_is_disconnected(struct subchannel *sch)
43{
44 struct ccw_device *cdev;
45
46 cdev = sch_get_cdev(sch);
47 if (!cdev)
48 return 0;
49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51}
52
53void
54device_set_disconnected(struct subchannel *sch)
55{
56 struct ccw_device *cdev;
57
58 cdev = sch_get_cdev(sch);
59 if (!cdev)
60 return;
61 ccw_device_set_timeout(cdev, 0);
62 cdev->private->flags.fake_irb = 0;
63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
66}
67
68void device_set_intretry(struct subchannel *sch)
69{
70 struct ccw_device *cdev;
71
72 cdev = sch_get_cdev(sch);
73 if (!cdev)
74 return;
75 cdev->private->flags.intretry = 1;
76}
77
78int device_trigger_verify(struct subchannel *sch)
79{
80 struct ccw_device *cdev;
81
82 cdev = sch_get_cdev(sch);
83 if (!cdev || !cdev->online)
84 return -EINVAL;
85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 return 0;
87}
88
89static int __init ccw_timeout_log_setup(char *unused) 29static int __init ccw_timeout_log_setup(char *unused)
90{ 30{
91 timeout_log_enabled = 1; 31 timeout_log_enabled = 1;
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev)
99 struct schib schib; 39 struct schib schib;
100 struct subchannel *sch; 40 struct subchannel *sch;
101 struct io_subchannel_private *private; 41 struct io_subchannel_private *private;
42 union orb *orb;
102 int cc; 43 int cc;
103 44
104 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb;
106 cc = stsch(sch->schid, &schib); 48 cc = stsch(sch->schid, &schib);
107 49
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n"); 52 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0); 54 orb, sizeof(*orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 57 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 58 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117 59
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || 60 if (orb->tm.b) {
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws) 61 printk(KERN_WARNING "cio: orb indicates transport mode\n");
120 printk(KERN_WARNING "cio: last channel program (intern):\n"); 62 printk(KERN_WARNING "cio: last tcw:\n");
121 else 63 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
122 printk(KERN_WARNING "cio: last channel program:\n"); 64 (void *)(addr_t)orb->tm.tcw,
123 65 sizeof(struct tcw), 0);
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 } else {
125 (void *)(addr_t)private->orb.cpa, 67 printk(KERN_WARNING "cio: orb indicates command mode\n");
126 sizeof(struct ccw1), 0); 68 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
69 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
70 printk(KERN_WARNING "cio: last channel program "
71 "(intern):\n");
72 else
73 printk(KERN_WARNING "cio: last channel program:\n");
74
75 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
76 (void *)(addr_t)orb->cmd.cpa,
77 sizeof(struct ccw1), 0);
78 }
127 printk(KERN_WARNING "cio: ccw device state: %d\n", 79 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state); 80 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 81 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
171 add_timer(&cdev->private->timer); 123 add_timer(&cdev->private->timer);
172} 124}
173 125
174/* Kill any pending timers after machine check. */
175void
176device_kill_pending_timer(struct subchannel *sch)
177{
178 struct ccw_device *cdev;
179
180 cdev = sch_get_cdev(sch);
181 if (!cdev)
182 return;
183 ccw_device_set_timeout(cdev, 0);
184}
185
186/* 126/*
187 * Cancel running i/o. This is called repeatedly since halt/clear are 127 * Cancel running i/o. This is called repeatedly since halt/clear are
188 * asynchronous operations. We do one try with cio_cancel, two tries 128 * asynchronous operations. We do one try with cio_cancel, two tries
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
205 /* Not operational -> done. */ 145 /* Not operational -> done. */
206 return 0; 146 return 0;
207 /* Stage 1: cancel io. */ 147 /* Stage 1: cancel io. */
208 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
209 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
210 ret = cio_cancel(sch); 150 if (!scsw_is_tm(&sch->schib.scsw)) {
211 if (ret != -EINVAL) 151 ret = cio_cancel(sch);
212 return ret; 152 if (ret != -EINVAL)
213 /* cancel io unsuccessful. From now on it is asynchronous. */ 153 return ret;
154 }
155 /* cancel io unsuccessful or not applicable (transport mode).
156 * Continue with asynchronous instructions. */
214 cdev->private->iretry = 3; /* 3 halt retries. */ 157 cdev->private->iretry = 3; /* 3 halt retries. */
215 } 158 }
216 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
217 /* Stage 2: halt io. */ 160 /* Stage 2: halt io. */
218 if (cdev->private->iretry) { 161 if (cdev->private->iretry) {
219 cdev->private->iretry--; 162 cdev->private->iretry--;
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
388 } 331 }
389} 332}
390 333
334int ccw_device_notify(struct ccw_device *cdev, int event)
335{
336 if (!cdev->drv)
337 return 0;
338 if (!cdev->online)
339 return 0;
340 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
341}
342
391static void 343static void
392ccw_device_oper_notify(struct work_struct *work) 344ccw_device_oper_notify(struct work_struct *work)
393{ 345{
394 struct ccw_device_private *priv; 346 struct ccw_device_private *priv;
395 struct ccw_device *cdev; 347 struct ccw_device *cdev;
396 struct subchannel *sch;
397 int ret; 348 int ret;
398 unsigned long flags;
399 349
400 priv = container_of(work, struct ccw_device_private, kick_work); 350 priv = container_of(work, struct ccw_device_private, kick_work);
401 cdev = priv->cdev; 351 cdev = priv->cdev;
402 spin_lock_irqsave(cdev->ccwlock, flags); 352 ret = ccw_device_notify(cdev, CIO_OPER);
403 sch = to_subchannel(cdev->dev.parent);
404 if (sch->driver && sch->driver->notify) {
405 spin_unlock_irqrestore(cdev->ccwlock, flags);
406 ret = sch->driver->notify(sch, CIO_OPER);
407 spin_lock_irqsave(cdev->ccwlock, flags);
408 } else
409 ret = 0;
410 if (ret) { 353 if (ret) {
411 /* Reenable channel measurements, if needed. */ 354 /* Reenable channel measurements, if needed. */
412 spin_unlock_irqrestore(cdev->ccwlock, flags);
413 cmf_reenable(cdev); 355 cmf_reenable(cdev);
414 spin_lock_irqsave(cdev->ccwlock, flags);
415 wake_up(&cdev->private->wait_q); 356 wake_up(&cdev->private->wait_q);
416 } 357 } else
417 spin_unlock_irqrestore(cdev->ccwlock, flags);
418 if (!ret)
419 /* Driver doesn't want device back. */ 358 /* Driver doesn't want device back. */
420 ccw_device_do_unreg_rereg(work); 359 ccw_device_do_unreg_rereg(work);
421} 360}
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
621 /* Deliver fake irb to device driver, if needed. */ 560 /* Deliver fake irb to device driver, if needed. */
622 if (cdev->private->flags.fake_irb) { 561 if (cdev->private->flags.fake_irb) {
623 memset(&cdev->private->irb, 0, sizeof(struct irb)); 562 memset(&cdev->private->irb, 0, sizeof(struct irb));
624 cdev->private->irb.scsw.cc = 1; 563 cdev->private->irb.scsw.cmd.cc = 1;
625 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; 564 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
626 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; 565 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
627 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; 566 cdev->private->irb.scsw.cmd.stctl =
567 SCSW_STCTL_STATUS_PEND;
628 cdev->private->flags.fake_irb = 0; 568 cdev->private->flags.fake_irb = 0;
629 if (cdev->handler) 569 if (cdev->handler)
630 cdev->handler(cdev, cdev->private->intparm, 570 cdev->handler(cdev, cdev->private->intparm,
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev)
718 sch = to_subchannel(cdev->dev.parent); 658 sch = to_subchannel(cdev->dev.parent);
719 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 659 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
720 return -ENODEV; 660 return -ENODEV;
721 if (cdev->private->state != DEV_STATE_ONLINE) { 661 if (scsw_actl(&sch->schib.scsw) != 0)
722 if (sch->schib.scsw.actl != 0)
723 return -EBUSY;
724 return -EINVAL;
725 }
726 if (sch->schib.scsw.actl != 0)
727 return -EBUSY; 662 return -EBUSY;
663 if (cdev->private->state != DEV_STATE_ONLINE)
664 return -EINVAL;
728 /* Are we doing path grouping? */ 665 /* Are we doing path grouping? */
729 if (!cdev->private->options.pgroup) { 666 if (!cdev->private->options.pgroup) {
730 /* No, set state offline immediately. */ 667 /* No, set state offline immediately. */
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
799 */ 736 */
800 stsch(sch->schid, &sch->schib); 737 stsch(sch->schid, &sch->schib);
801 738
802 if (sch->schib.scsw.actl != 0 || 739 if (scsw_actl(&sch->schib.scsw) != 0 ||
803 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || 740 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
804 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 741 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
805 /* 742 /*
806 * No final status yet or final status not yet delivered 743 * No final status yet or final status not yet delivered
807 * to the device driver. Can't do path verfication now, 744 * to the device driver. Can't do path verfication now,
@@ -823,13 +760,13 @@ static void
823ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 760ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
824{ 761{
825 struct irb *irb; 762 struct irb *irb;
763 int is_cmd;
826 764
827 irb = (struct irb *) __LC_IRB; 765 irb = (struct irb *) __LC_IRB;
766 is_cmd = !scsw_is_tm(&irb->scsw);
828 /* Check for unsolicited interrupt. */ 767 /* Check for unsolicited interrupt. */
829 if ((irb->scsw.stctl == 768 if (!scsw_is_solicited(&irb->scsw)) {
830 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 769 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
831 && (!irb->scsw.cc)) {
832 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
833 !irb->esw.esw0.erw.cons) { 770 !irb->esw.esw0.erw.cons) {
834 /* Unit check but no sense data. Need basic sense. */ 771 /* Unit check but no sense data. Need basic sense. */
835 if (ccw_device_do_sense(cdev, irb) != 0) 772 if (ccw_device_do_sense(cdev, irb) != 0)
@@ -848,7 +785,7 @@ call_handler_unsol:
848 } 785 }
849 /* Accumulate status and find out if a basic sense is needed. */ 786 /* Accumulate status and find out if a basic sense is needed. */
850 ccw_device_accumulate_irb(cdev, irb); 787 ccw_device_accumulate_irb(cdev, irb);
851 if (cdev->private->flags.dosense) { 788 if (is_cmd && cdev->private->flags.dosense) {
852 if (ccw_device_do_sense(cdev, irb) == 0) { 789 if (ccw_device_do_sense(cdev, irb) == 0) {
853 cdev->private->state = DEV_STATE_W4SENSE; 790 cdev->private->state = DEV_STATE_W4SENSE;
854 } 791 }
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
892 829
893 irb = (struct irb *) __LC_IRB; 830 irb = (struct irb *) __LC_IRB;
894 /* Check for unsolicited interrupt. */ 831 /* Check for unsolicited interrupt. */
895 if (irb->scsw.stctl == 832 if (scsw_stctl(&irb->scsw) ==
896 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 833 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
897 if (irb->scsw.cc == 1) 834 if (scsw_cc(&irb->scsw) == 1)
898 /* Basic sense hasn't started. Try again. */ 835 /* Basic sense hasn't started. Try again. */
899 ccw_device_do_sense(cdev, irb); 836 ccw_device_do_sense(cdev, irb);
900 else { 837 else {
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
912 * only deliver the halt/clear interrupt to the device driver as if it 849 * only deliver the halt/clear interrupt to the device driver as if it
913 * had killed the original request. 850 * had killed the original request.
914 */ 851 */
915 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 852 if (scsw_fctl(&irb->scsw) &
853 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
916 /* Retry Basic Sense if requested. */ 854 /* Retry Basic Sense if requested. */
917 if (cdev->private->flags.intretry) { 855 if (cdev->private->flags.intretry) {
918 cdev->private->flags.intretry = 0; 856 cdev->private->flags.intretry = 0;
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
986 ERR_PTR(-EIO)); 924 ERR_PTR(-EIO));
987} 925}
988 926
989void device_kill_io(struct subchannel *sch) 927void ccw_device_kill_io(struct ccw_device *cdev)
990{ 928{
991 int ret; 929 int ret;
992 struct ccw_device *cdev;
993 930
994 cdev = sch_get_cdev(sch);
995 ret = ccw_device_cancel_halt_clear(cdev); 931 ret = ccw_device_cancel_halt_clear(cdev);
996 if (ret == -EBUSY) { 932 if (ret == -EBUSY) {
997 ccw_device_set_timeout(cdev, 3*HZ); 933 ccw_device_set_timeout(cdev, 3*HZ);
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1021 case DEV_EVENT_INTERRUPT: 957 case DEV_EVENT_INTERRUPT:
1022 irb = (struct irb *) __LC_IRB; 958 irb = (struct irb *) __LC_IRB;
1023 /* Check for unsolicited interrupt. */ 959 /* Check for unsolicited interrupt. */
1024 if ((irb->scsw.stctl == 960 if ((scsw_stctl(&irb->scsw) ==
1025 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 961 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1026 (!irb->scsw.cc)) 962 (!scsw_cc(&irb->scsw)))
1027 /* FIXME: we should restart stlck here, but this 963 /* FIXME: we should restart stlck here, but this
1028 * is extremely unlikely ... */ 964 * is extremely unlikely ... */
1029 goto out_wakeup; 965 goto out_wakeup;
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1055 ccw_device_sense_id_start(cdev); 991 ccw_device_sense_id_start(cdev);
1056} 992}
1057 993
1058void 994void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1059device_trigger_reprobe(struct subchannel *sch)
1060{ 995{
1061 struct ccw_device *cdev; 996 struct subchannel *sch;
1062 997
1063 cdev = sch_get_cdev(sch);
1064 if (!cdev)
1065 return;
1066 if (cdev->private->state != DEV_STATE_DISCONNECTED) 998 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 return; 999 return;
1068 1000
1001 sch = to_subchannel(cdev->dev.parent);
1069 /* Update some values. */ 1002 /* Update some values. */
1070 if (stsch(sch->schid, &sch->schib)) 1003 if (stsch(sch->schid, &sch->schib))
1071 return; 1004 return;
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch)
1081 sch->schib.pmcw.ena = 0; 1014 sch->schib.pmcw.ena = 0;
1082 if ((sch->lpm & (sch->lpm - 1)) != 0) 1015 if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 sch->schib.pmcw.mp = 1; 1016 sch->schib.pmcw.mp = 1;
1084 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 /* We should also udate ssd info, but this has to wait. */ 1017 /* We should also udate ssd info, but this has to wait. */
1086 /* Check if this is another device which appeared on the same sch. */ 1018 /* Check if this is another device which appeared on the same sch. */
1087 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1019 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index cba7020517ed..1bdaa614e34f 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
196 irb = &cdev->private->irb; 196 irb = &cdev->private->irb;
197 197
198 /* Check the error cases. */ 198 /* Check the error cases. */
199 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
200 /* Retry Sense ID if requested. */ 200 /* Retry Sense ID if requested. */
201 if (cdev->private->flags.intretry) { 201 if (cdev->private->flags.intretry) {
202 cdev->private->flags.intretry = 0; 202 cdev->private->flags.intretry = 0;
@@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
234 irb->ecw[6], irb->ecw[7]); 234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN; 235 return -EAGAIN;
236 } 236 }
237 if (irb->scsw.cc == 3) { 237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm; 238 u8 lpm;
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.cmd.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
@@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
248 } 248 }
249 249
250 /* Did we get a proper answer ? */ 250 /* Did we get a proper answer ? */
251 if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && 251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) { 252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.count < sizeof(struct senseid) - 8) 253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1; 254 cdev->private->flags.esid = 1;
255 return 0; /* Success */ 255 return 0; /* Success */
256 } 256 }
@@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
260 "subchannel 0.%x.%04x returns status %02X%02X\n", 260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid, 261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no, 262 sch->schid.sch_no,
263 irb->scsw.dstat, irb->scsw.cstat); 263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 264 return -EAGAIN;
265} 265}
266 266
@@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
277 sch = to_subchannel(cdev->dev.parent); 277 sch = to_subchannel(cdev->dev.parent);
278 irb = (struct irb *) __LC_IRB; 278 irb = (struct irb *) __LC_IRB;
279 /* Retry sense id, if needed. */ 279 /* Retry sense id, if needed. */
280 if (irb->scsw.stctl == 280 if (irb->scsw.cmd.stctl ==
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
282 if ((irb->scsw.cc == 1) || !irb->scsw.actl) { 282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
283 ret = __ccw_device_sense_id_start(cdev); 283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY) 284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret); 285 ccw_device_sense_id_done(cdev, ret);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f308ad55a6d5..ee1a28310fbb 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -17,6 +17,7 @@
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h> 19#include <asm/chpid.h>
20#include <asm/fcx.h>
20 21
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
179 return -EBUSY; 180 return -EBUSY;
180 } 181 }
181 if (cdev->private->state != DEV_STATE_ONLINE || 182 if (cdev->private->state != DEV_STATE_ONLINE ||
182 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 183 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
183 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 184 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
184 cdev->private->flags.doverify) 185 cdev->private->flags.doverify)
185 return -EBUSY; 186 return -EBUSY;
186 ret = cio_set_options (sch, flags); 187 ret = cio_set_options (sch, flags);
@@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev)
379 if (cdev->private->state == DEV_STATE_NOT_OPER) 380 if (cdev->private->state == DEV_STATE_NOT_OPER)
380 return -ENODEV; 381 return -ENODEV;
381 if (cdev->private->state != DEV_STATE_ONLINE || 382 if (cdev->private->state != DEV_STATE_ONLINE ||
382 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 383 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
383 return -EINVAL; 384 return -EINVAL;
384 return cio_resume(sch); 385 return cio_resume(sch);
385} 386}
@@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev)
404 * - fast notification was requested (primary status) 405 * - fast notification was requested (primary status)
405 * - unsolicited interrupts 406 * - unsolicited interrupts
406 */ 407 */
407 stctl = cdev->private->irb.scsw.stctl; 408 stctl = scsw_stctl(&cdev->private->irb.scsw);
408 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 409 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
409 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 410 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
410 (stctl == SCSW_STCTL_STATUS_PEND); 411 (stctl == SCSW_STCTL_STATUS_PEND);
@@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev)
528 cio_disable_subchannel(sch); //FIXME: return code? 529 cio_disable_subchannel(sch); //FIXME: return code?
529 goto out_unlock; 530 goto out_unlock;
530 } 531 }
531 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
532 spin_unlock_irqrestore(sch->lock, flags); 533 spin_unlock_irqrestore(sch->lock, flags);
533 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 534 wait_event(cdev->private->wait_q,
535 cdev->private->irb.scsw.cmd.actl == 0);
534 spin_lock_irqsave(sch->lock, flags); 536 spin_lock_irqsave(sch->lock, flags);
535 cio_disable_subchannel(sch); //FIXME: return code? 537 cio_disable_subchannel(sch); //FIXME: return code?
536 if ((cdev->private->irb.scsw.dstat != 538 if ((cdev->private->irb.scsw.cmd.dstat !=
537 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
538 (cdev->private->irb.scsw.cstat != 0)) 540 (cdev->private->irb.scsw.cmd.cstat != 0))
539 ret = -EIO; 541 ret = -EIO;
540 /* Clear irb. */ 542 /* Clear irb. */
541 memset(&cdev->private->irb, 0, sizeof(struct irb)); 543 memset(&cdev->private->irb, 0, sizeof(struct irb));
@@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
568} 570}
569EXPORT_SYMBOL(ccw_device_get_id); 571EXPORT_SYMBOL(ccw_device_get_id);
570 572
573/**
574 * ccw_device_tm_start_key - perform start function
575 * @cdev: ccw device on which to perform the start function
576 * @tcw: transport-command word to be started
577 * @intparm: user defined parameter to be passed to the interrupt handler
578 * @lpm: mask of paths to use
579 * @key: storage key to use for storage access
580 *
581 * Start the tcw on the given ccw device. Return zero on success, non-zero
582 * otherwise.
583 */
584int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
585 unsigned long intparm, u8 lpm, u8 key)
586{
587 struct subchannel *sch;
588 int rc;
589
590 sch = to_subchannel(cdev->dev.parent);
591 if (cdev->private->state != DEV_STATE_ONLINE)
592 return -EIO;
593 /* Adjust requested path mask to excluded varied off paths. */
594 if (lpm) {
595 lpm &= sch->opm;
596 if (lpm == 0)
597 return -EACCES;
598 }
599 rc = cio_tm_start_key(sch, tcw, lpm, key);
600 if (rc == 0)
601 cdev->private->intparm = intparm;
602 return rc;
603}
604EXPORT_SYMBOL(ccw_device_tm_start_key);
605
606/**
607 * ccw_device_tm_start_timeout_key - perform start function
608 * @cdev: ccw device on which to perform the start function
609 * @tcw: transport-command word to be started
610 * @intparm: user defined parameter to be passed to the interrupt handler
611 * @lpm: mask of paths to use
612 * @key: storage key to use for storage access
613 * @expires: time span in jiffies after which to abort request
614 *
615 * Start the tcw on the given ccw device. Return zero on success, non-zero
616 * otherwise.
617 */
618int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
619 unsigned long intparm, u8 lpm, u8 key,
620 int expires)
621{
622 int ret;
623
624 ccw_device_set_timeout(cdev, expires);
625 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
626 if (ret != 0)
627 ccw_device_set_timeout(cdev, 0);
628 return ret;
629}
630EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
631
632/**
633 * ccw_device_tm_start - perform start function
634 * @cdev: ccw device on which to perform the start function
635 * @tcw: transport-command word to be started
636 * @intparm: user defined parameter to be passed to the interrupt handler
637 * @lpm: mask of paths to use
638 *
639 * Start the tcw on the given ccw device. Return zero on success, non-zero
640 * otherwise.
641 */
642int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
643 unsigned long intparm, u8 lpm)
644{
645 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
646 PAGE_DEFAULT_KEY);
647}
648EXPORT_SYMBOL(ccw_device_tm_start);
649
650/**
651 * ccw_device_tm_start_timeout - perform start function
652 * @cdev: ccw device on which to perform the start function
653 * @tcw: transport-command word to be started
654 * @intparm: user defined parameter to be passed to the interrupt handler
655 * @lpm: mask of paths to use
656 * @expires: time span in jiffies after which to abort request
657 *
658 * Start the tcw on the given ccw device. Return zero on success, non-zero
659 * otherwise.
660 */
661int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
662 unsigned long intparm, u8 lpm, int expires)
663{
664 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
665 PAGE_DEFAULT_KEY, expires);
666}
667EXPORT_SYMBOL(ccw_device_tm_start_timeout);
668
669/**
670 * ccw_device_tm_intrg - perform interrogate function
671 * @cdev: ccw device on which to perform the interrogate function
672 *
673 * Perform an interrogate function on the given ccw device. Return zero on
674 * success, non-zero otherwise.
675 */
676int ccw_device_tm_intrg(struct ccw_device *cdev)
677{
678 struct subchannel *sch = to_subchannel(cdev->dev.parent);
679
680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
684 return -EINVAL;
685 return cio_tm_intrg(sch);
686}
687EXPORT_SYMBOL(ccw_device_tm_intrg);
688
571// FIXME: these have to go: 689// FIXME: these have to go:
572 690
573int 691int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 5cf7be008e98..86bc94eb607f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -28,13 +28,13 @@
28 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
29 * operation should be tried again. 29 * operation should be tried again.
30 */ 30 */
31static int __ccw_device_should_retry(struct scsw *scsw) 31static int __ccw_device_should_retry(union scsw *scsw)
32{ 32{
33 /* CC is only valid if start function bit is set. */ 33 /* CC is only valid if start function bit is set. */
34 if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) 34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
35 return 1; 35 return 1;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 36 /* No more activity. For sense and set PGID we stubbornly try again. */
37 if (!scsw->actl) 37 if (!scsw->cmd.actl)
38 return 1; 38 return 1;
39 return 0; 39 return 0;
40} 40}
@@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
125 125
126 sch = to_subchannel(cdev->dev.parent); 126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb; 127 irb = &cdev->private->irb;
128 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */ 129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) { 130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0; 131 cdev->private->flags.intretry = 0;
@@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 irb->ecw[6], irb->ecw[7]); 155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN; 156 return -EAGAIN;
157 } 157 }
158 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
188 188
189 irb = (struct irb *) __LC_IRB; 189 irb = (struct irb *) __LC_IRB;
190 190
191 if (irb->scsw.stctl == 191 if (irb->scsw.cmd.stctl ==
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
193 if (__ccw_device_should_retry(&irb->scsw)) { 193 if (__ccw_device_should_retry(&irb->scsw)) {
194 ret = __ccw_device_sense_pgid_start(cdev); 194 ret = __ccw_device_sense_pgid_start(cdev);
@@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
331 331
332 sch = to_subchannel(cdev->dev.parent); 332 sch = to_subchannel(cdev->dev.parent);
333 irb = &cdev->private->irb; 333 irb = &cdev->private->irb;
334 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
335 /* Retry Set PGID if requested. */ 335 /* Retry Set PGID if requested. */
336 if (cdev->private->flags.intretry) { 336 if (cdev->private->flags.intretry) {
337 cdev->private->flags.intretry = 0; 337 cdev->private->flags.intretry = 0;
@@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
355 irb->ecw[6], irb->ecw[7]); 355 irb->ecw[6], irb->ecw[7]);
356 return -EAGAIN; 356 return -EAGAIN;
357 } 357 }
358 if (irb->scsw.cc == 3) { 358 if (irb->scsw.cmd.cc == 3) {
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
360 " lpm %02X, became 'not operational'\n", 360 " lpm %02X, became 'not operational'\n",
361 cdev->private->dev_id.devno, sch->schid.ssid, 361 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
376 376
377 sch = to_subchannel(cdev->dev.parent); 377 sch = to_subchannel(cdev->dev.parent);
378 irb = &cdev->private->irb; 378 irb = &cdev->private->irb;
379 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
380 /* Retry NOP if requested. */ 380 /* Retry NOP if requested. */
381 if (cdev->private->flags.intretry) { 381 if (cdev->private->flags.intretry) {
382 cdev->private->flags.intretry = 0; 382 cdev->private->flags.intretry = 0;
@@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
384 } 384 }
385 return -ETIME; 385 return -ETIME;
386 } 386 }
387 if (irb->scsw.cc == 3) { 387 if (irb->scsw.cmd.cc == 3) {
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
389 " lpm %02X, became 'not operational'\n", 389 " lpm %02X, became 'not operational'\n",
390 cdev->private->dev_id.devno, sch->schid.ssid, 390 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
438 438
439 irb = (struct irb *) __LC_IRB; 439 irb = (struct irb *) __LC_IRB;
440 440
441 if (irb->scsw.stctl == 441 if (irb->scsw.cmd.stctl ==
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
443 if (__ccw_device_should_retry(&irb->scsw)) 443 if (__ccw_device_should_retry(&irb->scsw))
444 __ccw_device_verify_start(cdev); 444 __ccw_device_verify_start(cdev);
@@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
544 544
545 irb = (struct irb *) __LC_IRB; 545 irb = (struct irb *) __LC_IRB;
546 546
547 if (irb->scsw.stctl == 547 if (irb->scsw.cmd.stctl ==
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
549 if (__ccw_device_should_retry(&irb->scsw)) 549 if (__ccw_device_should_retry(&irb->scsw))
550 __ccw_device_disband_start(cdev); 550 __ccw_device_disband_start(cdev);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 4a38993000f2..1b03c5423be2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -29,9 +29,11 @@
29static void 29static void
30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
31{ 31{
32 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 32 char dbf_text[15];
33 SCHN_STAT_CHN_CTRL_CHK | 33
34 SCHN_STAT_INTF_CTRL_CHK))) 34 if (!scsw_is_valid_cstat(&irb->scsw) ||
35 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
36 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
35 return; 37 return;
36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 38 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
37 "received" 39 "received"
@@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
39 ": %02X sch_stat : %02X\n", 41 ": %02X sch_stat : %02X\n",
40 cdev->private->dev_id.devno, cdev->private->schid.ssid, 42 cdev->private->dev_id.devno, cdev->private->schid.ssid,
41 cdev->private->schid.sch_no, 43 cdev->private->schid.sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 44 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
43 45 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
44 if (irb->scsw.cc != 3) { 46 CIO_TRACE_EVENT(0, dbf_text);
45 char dbf_text[15]; 47 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
46
47 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
48 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 }
51} 48}
52 49
53/* 50/*
@@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
81 * are condition that have to be met for the extended control 78 * are condition that have to be met for the extended control
82 * bit to have meaning. Sick. 79 * bit to have meaning. Sick.
83 */ 80 */
84 cdev->private->irb.scsw.ectl = 0; 81 cdev->private->irb.scsw.cmd.ectl = 0;
85 if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && 82 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
86 !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) 83 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
87 cdev->private->irb.scsw.ectl = irb->scsw.ectl; 84 cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
88 /* Check if extended control word is valid. */ 85 /* Check if extended control word is valid. */
89 if (!cdev->private->irb.scsw.ectl) 86 if (!cdev->private->irb.scsw.cmd.ectl)
90 return; 87 return;
91 /* Copy concurrent sense / model dependent information. */ 88 /* Copy concurrent sense / model dependent information. */
92 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); 89 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
@@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
98static int 95static int
99ccw_device_accumulate_esw_valid(struct irb *irb) 96ccw_device_accumulate_esw_valid(struct irb *irb)
100{ 97{
101 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 98 if (!irb->scsw.cmd.eswf &&
99 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
102 return 0; 100 return 0;
103 if (irb->scsw.stctl == 101 if (irb->scsw.cmd.stctl ==
104 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && 102 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
105 !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 103 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
106 return 0; 104 return 0;
107 return 1; 105 return 1;
108} 106}
@@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
125 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; 123 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
126 124
127 /* Copy subchannel logout information if esw is of format 0. */ 125 /* Copy subchannel logout information if esw is of format 0. */
128 if (irb->scsw.eswf) { 126 if (irb->scsw.cmd.eswf) {
129 cdev_sublog = &cdev_irb->esw.esw0.sublog; 127 cdev_sublog = &cdev_irb->esw.esw0.sublog;
130 sublog = &irb->esw.esw0.sublog; 128 sublog = &irb->esw.esw0.sublog;
131 /* Copy extended status flags. */ 129 /* Copy extended status flags. */
@@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
134 * Copy fields that have a meaning for channel data check 132 * Copy fields that have a meaning for channel data check
135 * channel control check and interface control check. 133 * channel control check and interface control check.
136 */ 134 */
137 if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 135 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
138 SCHN_STAT_CHN_CTRL_CHK | 136 SCHN_STAT_CHN_CTRL_CHK |
139 SCHN_STAT_INTF_CTRL_CHK)) { 137 SCHN_STAT_INTF_CTRL_CHK)) {
140 /* Copy ancillary report bit. */ 138 /* Copy ancillary report bit. */
@@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
155 /* Copy i/o-error alert. */ 153 /* Copy i/o-error alert. */
156 cdev_sublog->ioerr = sublog->ioerr; 154 cdev_sublog->ioerr = sublog->ioerr;
157 /* Copy channel path timeout bit. */ 155 /* Copy channel path timeout bit. */
158 if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) 156 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
159 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; 157 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
160 /* Copy failing storage address validity flag. */ 158 /* Copy failing storage address validity flag. */
161 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; 159 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
@@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
200 * If not, the remaining bit have no meaning and we must ignore them. 198 * If not, the remaining bit have no meaning and we must ignore them.
201 * The esw is not meaningful as well... 199 * The esw is not meaningful as well...
202 */ 200 */
203 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 201 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
204 return; 202 return;
205 203
206 /* Check for channel checks and interface control checks. */ 204 /* Check for channel checks and interface control checks. */
207 ccw_device_msg_control_check(cdev, irb); 205 ccw_device_msg_control_check(cdev, irb);
208 206
209 /* Check for path not operational. */ 207 /* Check for path not operational. */
210 if (irb->scsw.pno && irb->scsw.fctl != 0 && 208 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
211 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
212 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
213 ccw_device_path_notoper(cdev); 209 ccw_device_path_notoper(cdev);
214 210 /* No irb accumulation for transport mode irbs. */
211 if (scsw_is_tm(&irb->scsw)) {
212 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
213 return;
214 }
215 /* 215 /*
216 * Don't accumulate unsolicited interrupts. 216 * Don't accumulate unsolicited interrupts.
217 */ 217 */
218 if ((irb->scsw.stctl == 218 if (!scsw_is_solicited(&irb->scsw))
219 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
220 (!irb->scsw.cc))
221 return; 219 return;
222 220
223 cdev_irb = &cdev->private->irb; 221 cdev_irb = &cdev->private->irb;
@@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
227 * status at the subchannel has been cleared and we must not pass 225 * status at the subchannel has been cleared and we must not pass
228 * intermediate accumulated status to the device driver. 226 * intermediate accumulated status to the device driver.
229 */ 227 */
230 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) 228 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
231 memset(&cdev->private->irb, 0, sizeof(struct irb)); 229 memset(&cdev->private->irb, 0, sizeof(struct irb));
232 230
233 /* Copy bits which are valid only for the start function. */ 231 /* Copy bits which are valid only for the start function. */
234 if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { 232 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
235 /* Copy key. */ 233 /* Copy key. */
236 cdev_irb->scsw.key = irb->scsw.key; 234 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
237 /* Copy suspend control bit. */ 235 /* Copy suspend control bit. */
238 cdev_irb->scsw.sctl = irb->scsw.sctl; 236 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
239 /* Accumulate deferred condition code. */ 237 /* Accumulate deferred condition code. */
240 cdev_irb->scsw.cc |= irb->scsw.cc; 238 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
241 /* Copy ccw format bit. */ 239 /* Copy ccw format bit. */
242 cdev_irb->scsw.fmt = irb->scsw.fmt; 240 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
243 /* Copy prefetch bit. */ 241 /* Copy prefetch bit. */
244 cdev_irb->scsw.pfch = irb->scsw.pfch; 242 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
245 /* Copy initial-status-interruption-control. */ 243 /* Copy initial-status-interruption-control. */
246 cdev_irb->scsw.isic = irb->scsw.isic; 244 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
247 /* Copy address limit checking control. */ 245 /* Copy address limit checking control. */
248 cdev_irb->scsw.alcc = irb->scsw.alcc; 246 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
249 /* Copy suppress suspend bit. */ 247 /* Copy suppress suspend bit. */
250 cdev_irb->scsw.ssi = irb->scsw.ssi; 248 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
251 } 249 }
252 250
253 /* Take care of the extended control bit and extended control word. */ 251 /* Take care of the extended control bit and extended control word. */
254 ccw_device_accumulate_ecw(cdev, irb); 252 ccw_device_accumulate_ecw(cdev, irb);
255 253
256 /* Accumulate function control. */ 254 /* Accumulate function control. */
257 cdev_irb->scsw.fctl |= irb->scsw.fctl; 255 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
258 /* Copy activity control. */ 256 /* Copy activity control. */
259 cdev_irb->scsw.actl= irb->scsw.actl; 257 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
260 /* Accumulate status control. */ 258 /* Accumulate status control. */
261 cdev_irb->scsw.stctl |= irb->scsw.stctl; 259 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
262 /* 260 /*
263 * Copy ccw address if it is valid. This is a bit simplified 261 * Copy ccw address if it is valid. This is a bit simplified
264 * but should be close enough for all practical purposes. 262 * but should be close enough for all practical purposes.
265 */ 263 */
266 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || 264 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
267 ((irb->scsw.stctl == 265 ((irb->scsw.cmd.stctl ==
268 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && 266 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
269 (irb->scsw.actl & SCSW_ACTL_DEVACT) && 267 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
270 (irb->scsw.actl & SCSW_ACTL_SCHACT)) || 268 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
271 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 269 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
272 cdev_irb->scsw.cpa = irb->scsw.cpa; 270 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
273 /* Accumulate device status, but not the device busy flag. */ 271 /* Accumulate device status, but not the device busy flag. */
274 cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; 272 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
275 /* dstat is not always valid. */ 273 /* dstat is not always valid. */
276 if (irb->scsw.stctl & 274 if (irb->scsw.cmd.stctl &
277 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS 275 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
278 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) 276 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
279 cdev_irb->scsw.dstat |= irb->scsw.dstat; 277 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
280 /* Accumulate subchannel status. */ 278 /* Accumulate subchannel status. */
281 cdev_irb->scsw.cstat |= irb->scsw.cstat; 279 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
282 /* Copy residual count if it is valid. */ 280 /* Copy residual count if it is valid. */
283 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 281 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
284 (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) 282 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
285 cdev_irb->scsw.count = irb->scsw.count; 283 == 0)
284 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
286 285
287 /* Take care of bits in the extended status word. */ 286 /* Take care of bits in the extended status word. */
288 ccw_device_accumulate_esw(cdev, irb); 287 ccw_device_accumulate_esw(cdev, irb);
@@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
299 * sense facility available/supported when enabling the 298 * sense facility available/supported when enabling the
300 * concurrent sense facility. 299 * concurrent sense facility.
301 */ 300 */
302 if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 301 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303 !(cdev_irb->esw.esw0.erw.cons)) 302 !(cdev_irb->esw.esw0.erw.cons))
304 cdev->private->flags.dosense = 1; 303 cdev->private->flags.dosense = 1;
305} 304}
@@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
318 317
319 /* A sense is required, can we do it now ? */ 318 /* A sense is required, can we do it now ? */
320 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 319 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
321 /* 320 /*
322 * we received an Unit Check but we have no final 321 * we received an Unit Check but we have no final
323 * status yet, therefore we must delay the SENSE 322 * status yet, therefore we must delay the SENSE
@@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
355 * If not, the remaining bit have no meaning and we must ignore them. 354 * If not, the remaining bit have no meaning and we must ignore them.
356 * The esw is not meaningful as well... 355 * The esw is not meaningful as well...
357 */ 356 */
358 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 357 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
359 return; 358 return;
360 359
361 /* Check for channel checks and interface control checks. */ 360 /* Check for channel checks and interface control checks. */
362 ccw_device_msg_control_check(cdev, irb); 361 ccw_device_msg_control_check(cdev, irb);
363 362
364 /* Check for path not operational. */ 363 /* Check for path not operational. */
365 if (irb->scsw.pno && irb->scsw.fctl != 0 && 364 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
366 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
367 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
368 ccw_device_path_notoper(cdev); 365 ccw_device_path_notoper(cdev);
369 366
370 if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 367 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
371 (irb->scsw.dstat & DEV_STAT_CHN_END)) { 368 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
372 cdev->private->irb.esw.esw0.erw.cons = 1; 369 cdev->private->irb.esw.esw0.erw.cons = 1;
373 cdev->private->flags.dosense = 0; 370 cdev->private->flags.dosense = 0;
374 } 371 }
@@ -386,11 +383,11 @@ int
386ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) 383ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
387{ 384{
388 ccw_device_accumulate_irb(cdev, irb); 385 ccw_device_accumulate_irb(cdev, irb);
389 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 386 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
390 return -EBUSY; 387 return -EBUSY;
391 /* Check for basic sense. */ 388 /* Check for basic sense. */
392 if (cdev->private->flags.dosense && 389 if (cdev->private->flags.dosense &&
393 !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { 390 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
394 cdev->private->irb.esw.esw0.erw.cons = 1; 391 cdev->private->irb.esw.esw0.erw.cons = 1;
395 cdev->private->flags.dosense = 0; 392 cdev->private->flags.dosense = 0;
396 return 0; 393 return 0;
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000000..61677dfbdc9b
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
1/*
2 * Functions for assembling fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include "cio.h"
16
17/**
18 * tcw_get_intrg - return pointer to associated interrogate tcw
19 * @tcw: pointer to the original tcw
20 *
21 * Return a pointer to the interrogate tcw associated with the specified tcw
22 * or %NULL if there is no associated interrogate tcw.
23 */
24struct tcw *tcw_get_intrg(struct tcw *tcw)
25{
26 return (struct tcw *) ((addr_t) tcw->intrg);
27}
28EXPORT_SYMBOL(tcw_get_intrg);
29
30/**
31 * tcw_get_data - return pointer to input/output data associated with tcw
32 * @tcw: pointer to the tcw
33 *
34 * Return the input or output data address specified in the tcw depending
35 * on whether the r-bit or the w-bit is set. If neither bit is set, return
36 * %NULL.
37 */
38void *tcw_get_data(struct tcw *tcw)
39{
40 if (tcw->r)
41 return (void *) ((addr_t) tcw->input);
42 if (tcw->w)
43 return (void *) ((addr_t) tcw->output);
44 return NULL;
45}
46EXPORT_SYMBOL(tcw_get_data);
47
48/**
49 * tcw_get_tccb - return pointer to tccb associated with tcw
50 * @tcw: pointer to the tcw
51 *
52 * Return pointer to the tccb associated with this tcw.
53 */
54struct tccb *tcw_get_tccb(struct tcw *tcw)
55{
56 return (struct tccb *) ((addr_t) tcw->tccb);
57}
58EXPORT_SYMBOL(tcw_get_tccb);
59
60/**
61 * tcw_get_tsb - return pointer to tsb associated with tcw
62 * @tcw: pointer to the tcw
63 *
64 * Return pointer to the tsb associated with this tcw.
65 */
66struct tsb *tcw_get_tsb(struct tcw *tcw)
67{
68 return (struct tsb *) ((addr_t) tcw->tsb);
69}
70EXPORT_SYMBOL(tcw_get_tsb);
71
72/**
73 * tcw_init - initialize tcw data structure
74 * @tcw: pointer to the tcw to be initialized
75 * @r: initial value of the r-bit
76 * @w: initial value of the w-bit
77 *
78 * Initialize all fields of the specified tcw data structure with zero and
79 * fill in the format, flags, r and w fields.
80 */
81void tcw_init(struct tcw *tcw, int r, int w)
82{
83 memset(tcw, 0, sizeof(struct tcw));
84 tcw->format = TCW_FORMAT_DEFAULT;
85 tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
86 if (r)
87 tcw->r = 1;
88 if (w)
89 tcw->w = 1;
90}
91EXPORT_SYMBOL(tcw_init);
92
93static inline size_t tca_size(struct tccb *tccb)
94{
95 return tccb->tcah.tcal - 12;
96}
97
98static u32 calc_dcw_count(struct tccb *tccb)
99{
100 int offset;
101 struct dcw *dcw;
102 u32 count = 0;
103 size_t size;
104
105 size = tca_size(tccb);
106 for (offset = 0; offset < size;) {
107 dcw = (struct dcw *) &tccb->tca[offset];
108 count += dcw->count;
109 if (!(dcw->flags & DCW_FLAGS_CC))
110 break;
111 offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
112 }
113 return count;
114}
115
116static u32 calc_cbc_size(struct tidaw *tidaw, int num)
117{
118 int i;
119 u32 cbc_data;
120 u32 cbc_count = 0;
121 u64 data_count = 0;
122
123 for (i = 0; i < num; i++) {
124 if (tidaw[i].flags & TIDAW_FLAGS_LAST)
125 break;
126 /* TODO: find out if padding applies to total of data
127 * transferred or data transferred by this tidaw. Assumption:
128 * applies to total. */
129 data_count += tidaw[i].count;
130 if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
131 cbc_data = 4 + ALIGN(data_count, 4) - data_count;
132 cbc_count += cbc_data;
133 data_count += cbc_data;
134 }
135 }
136 return cbc_count;
137}
138
139/**
140 * tcw_finalize - finalize tcw length fields and tidaw list
141 * @tcw: pointer to the tcw
142 * @num_tidaws: the number of tidaws used to address input/output data or zero
143 * if no tida is used
144 *
145 * Calculate the input-/output-count and tccbl field in the tcw, add a
146 * tcat the tccb and terminate the data tidaw list if used.
147 *
148 * Note: in case input- or output-tida is used, the tidaw-list must be stored
149 * in contiguous storage (no ttic). The tcal field in the tccb must be
150 * up-to-date.
151 */
152void tcw_finalize(struct tcw *tcw, int num_tidaws)
153{
154 struct tidaw *tidaw;
155 struct tccb *tccb;
156 struct tccb_tcat *tcat;
157 u32 count;
158
159 /* Terminate tidaw list. */
160 tidaw = tcw_get_data(tcw);
161 if (num_tidaws > 0)
162 tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat));
167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
170 count += calc_cbc_size(tidaw, num_tidaws);
171 if (tcw->r)
172 tcw->input_count = count;
173 else if (tcw->w)
174 tcw->output_count = count;
175 tcat->count = ALIGN(count, 4) + 4;
176 /* Calculate tccbl. */
177 tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
178 sizeof(struct tccb_tcat) - 20) >> 2;
179}
180EXPORT_SYMBOL(tcw_finalize);
181
182/**
183 * tcw_set_intrg - set the interrogate tcw address of a tcw
184 * @tcw: the tcw address
185 * @intrg_tcw: the address of the interrogate tcw
186 *
187 * Set the address of the interrogate tcw in the specified tcw.
188 */
189void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
190{
191 tcw->intrg = (u32) ((addr_t) intrg_tcw);
192}
193EXPORT_SYMBOL(tcw_set_intrg);
194
195/**
196 * tcw_set_data - set data address and tida flag of a tcw
197 * @tcw: the tcw address
198 * @data: the data address
199 * @use_tidal: zero of the data address specifies a contiguous block of data,
200 * non-zero if it specifies a list if tidaws.
201 *
202 * Set the input/output data address of a tcw (depending on the value of the
203 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
204 * is set as well.
205 */
206void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
207{
208 if (tcw->r) {
209 tcw->input = (u64) ((addr_t) data);
210 if (use_tidal)
211 tcw->flags |= TCW_FLAGS_INPUT_TIDA;
212 } else if (tcw->w) {
213 tcw->output = (u64) ((addr_t) data);
214 if (use_tidal)
215 tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
216 }
217}
218EXPORT_SYMBOL(tcw_set_data);
219
220/**
221 * tcw_set_tccb - set tccb address of a tcw
222 * @tcw: the tcw address
223 * @tccb: the tccb address
224 *
225 * Set the address of the tccb in the specified tcw.
226 */
227void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
228{
229 tcw->tccb = (u64) ((addr_t) tccb);
230}
231EXPORT_SYMBOL(tcw_set_tccb);
232
233/**
234 * tcw_set_tsb - set tsb address of a tcw
235 * @tcw: the tcw address
236 * @tsb: the tsb address
237 *
238 * Set the address of the tsb in the specified tcw.
239 */
240void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
241{
242 tcw->tsb = (u64) ((addr_t) tsb);
243}
244EXPORT_SYMBOL(tcw_set_tsb);
245
246/**
247 * tccb_init - initialize tccb
248 * @tccb: the tccb address
249 * @size: the maximum size of the tccb
250 * @sac: the service-action-code to be user
251 *
252 * Initialize the header of the specified tccb by resetting all values to zero
253 * and filling in defaults for format, sac and initial tcal fields.
254 */
255void tccb_init(struct tccb *tccb, size_t size, u32 sac)
256{
257 memset(tccb, 0, size);
258 tccb->tcah.format = TCCB_FORMAT_DEFAULT;
259 tccb->tcah.sac = sac;
260 tccb->tcah.tcal = 12;
261}
262EXPORT_SYMBOL(tccb_init);
263
264/**
265 * tsb_init - initialize tsb
266 * @tsb: the tsb address
267 *
268 * Initialize the specified tsb by resetting all values to zero.
269 */
270void tsb_init(struct tsb *tsb)
271{
272 memset(tsb, 0, sizeof(tsb));
273}
274EXPORT_SYMBOL(tsb_init);
275
276/**
277 * tccb_add_dcw - add a dcw to the tccb
278 * @tccb: the tccb address
279 * @tccb_size: the maximum tccb size
280 * @cmd: the dcw command
281 * @flags: flags for the dcw
282 * @cd: pointer to control data for this dcw or NULL if none is required
283 * @cd_count: number of control data bytes for this dcw
284 * @count: number of data bytes for this dcw
285 *
286 * Add a new dcw to the specified tccb by writing the dcw information specified
287 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
288 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
289 * would exceed the available space as defined by @tccb_size.
290 *
291 * Note: the tcal field of the tccb header will be updates to reflect added
292 * content.
293 */
294struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
295 void *cd, u8 cd_count, u32 count)
296{
297 struct dcw *dcw;
298 int size;
299 int tca_offset;
300
301 /* Check for space. */
302 tca_offset = tca_size(tccb);
303 size = ALIGN(sizeof(struct dcw) + cd_count, 4);
304 if (sizeof(struct tccb_tcah) + tca_offset + size +
305 sizeof(struct tccb_tcat) > tccb_size)
306 return ERR_PTR(-ENOSPC);
307 /* Add dcw to tca. */
308 dcw = (struct dcw *) &tccb->tca[tca_offset];
309 memset(dcw, 0, size);
310 dcw->cmd = cmd;
311 dcw->flags = flags;
312 dcw->count = count;
313 dcw->cd_count = cd_count;
314 if (cd)
315 memcpy(&dcw->cd[0], cd, cd_count);
316 tccb->tcah.tcal += size;
317 return dcw;
318}
319EXPORT_SYMBOL(tccb_add_dcw);
320
321/**
322 * tcw_add_tidaw - add a tidaw to a tcw
323 * @tcw: the tcw address
324 * @num_tidaws: the current number of tidaws
325 * @flags: flags for the new tidaw
326 * @addr: address value for the new tidaw
327 * @count: count value for the new tidaw
328 *
329 * Add a new tidaw to the input/output data tidaw-list of the specified tcw
330 * (depending on the value of the r-flag and w-flag) and return a pointer to
331 * the new tidaw.
332 *
333 * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
334 * must ensure that there is enough space for the new tidaw. The last-tidaw
335 * flag for the last tidaw in the list will be set by tcw_finalize.
336 */
337struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
338 void *addr, u32 count)
339{
340 struct tidaw *tidaw;
341
342 /* Add tidaw to tidaw-list. */
343 tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
344 memset(tidaw, 0, sizeof(struct tidaw));
345 tidaw->flags = flags;
346 tidaw->count = count;
347 tidaw->addr = (u64) ((addr_t) addr);
348 return tidaw;
349}
350EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 144466ab8c15..528065cb5021 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -8,7 +8,7 @@
8#ifndef S390_IDSET_H 8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H 9#define S390_IDSET_H S390_IDSET_H
10 10
11#include "schid.h" 11#include <asm/schid.h>
12 12
13struct idset; 13struct idset;
14 14
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8c613160bfce..3f8f1cf69c76 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,12 +1,12 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include "schid.h" 4#include <asm/schid.h>
5 5
6/* 6/*
7 * operation request block 7 * command-mode operation request block
8 */ 8 */
9struct orb { 9struct cmd_orb {
10 u32 intparm; /* interruption parameter */ 10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */ 11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */ 12 u32 spnd : 1; /* suspend control */
@@ -28,8 +28,36 @@ struct orb {
28 u32 cpa; /* channel program address */ 28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4))); 29} __attribute__ ((packed, aligned(4)));
30 30
31/*
32 * transport-mode operation request block
33 */
34struct tm_orb {
35 u32 intparm;
36 u32 key:4;
37 u32 :9;
38 u32 b:1;
39 u32 :2;
40 u32 lpm:8;
41 u32 :7;
42 u32 x:1;
43 u32 tcw;
44 u32 prio:8;
45 u32 :8;
46 u32 rsvpgm:8;
47 u32 :8;
48 u32 :32;
49 u32 :32;
50 u32 :32;
51 u32 :32;
52} __attribute__ ((packed, aligned(4)));
53
54union orb {
55 struct cmd_orb cmd;
56 struct tm_orb tm;
57} __attribute__ ((packed, aligned(4)));
58
31struct io_subchannel_private { 59struct io_subchannel_private {
32 struct orb orb; /* operation request block */ 60 union orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */ 61 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8))); 62} __attribute__ ((aligned(8)));
35 63
@@ -95,16 +123,18 @@ struct ccw_device_private {
95 void *cmb_wait; /* deferred cmb enable/disable */ 123 void *cmb_wait; /* deferred cmb enable/disable */
96}; 124};
97 125
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) 126static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
99{ 127{
100 register struct subchannel_id reg1 asm("1") = schid; 128 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode; 129 int ccode = -EIO;
102 130
103 asm volatile( 131 asm volatile(
104 " ssch 0(%2)\n" 132 " ssch 0(%2)\n"
105 " ipm %0\n" 133 "0: ipm %0\n"
106 " srl %0,28" 134 " srl %0,28\n"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 135 "1:\n"
136 EX_TABLE(0b, 1b)
137 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode; 138 return ccode;
109} 139}
110 140
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 652ea3625f9d..9fa2ac13ac85 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -2,7 +2,7 @@
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include "schid.h" 5#include <asm/schid.h>
6 6
7/* 7/*
8 * TPI info structure 8 * TPI info structure
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 000000000000..c592087be0f1
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
1/*
2 * Functions for registration of I/O interruption subclasses on s390.
3 *
4 * Copyright IBM Corp. 2008
5 * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <asm/isc.h>
11
12static unsigned int isc_refs[MAX_ISC + 1];
13static DEFINE_SPINLOCK(isc_ref_lock);
14
15
16/**
17 * isc_register - register an I/O interruption subclass.
18 * @isc: I/O interruption subclass to register
19 *
20 * The number of users for @isc is increased. If this is the first user to
21 * register @isc, the corresponding I/O interruption subclass mask is enabled.
22 *
23 * Context:
24 * This function must not be called in interrupt context.
25 */
26void isc_register(unsigned int isc)
27{
28 if (isc > MAX_ISC) {
29 WARN_ON(1);
30 return;
31 }
32
33 spin_lock(&isc_ref_lock);
34 if (isc_refs[isc] == 0)
35 ctl_set_bit(6, 31 - isc);
36 isc_refs[isc]++;
37 spin_unlock(&isc_ref_lock);
38}
39EXPORT_SYMBOL_GPL(isc_register);
40
41/**
42 * isc_unregister - unregister an I/O interruption subclass.
43 * @isc: I/O interruption subclass to unregister
44 *
45 * The number of users for @isc is decreased. If this is the last user to
46 * unregister @isc, the corresponding I/O interruption subclass mask is
47 * disabled.
48 * Note: This function must not be called if isc_register() hasn't been called
49 * before by the driver for @isc.
50 *
51 * Context:
52 * This function must not be called in interrupt context.
53 */
54void isc_unregister(unsigned int isc)
55{
56 spin_lock(&isc_ref_lock);
57 /* check for misuse */
58 if (isc > MAX_ISC || isc_refs[isc] == 0) {
59 WARN_ON(1);
60 goto out_unlock;
61 }
62 if (isc_refs[isc] == 1)
63 ctl_clear_bit(6, 31 - isc);
64 isc_refs[isc]--;
65out_unlock:
66 spin_unlock(&isc_ref_lock);
67}
68EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000000..17da9ab932ed
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,327 @@
1/*
2 * Functions for incremental construction of fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include <asm/itcw.h>
16
17/**
18 * struct itcw - incremental tcw helper data type
19 *
20 * This structure serves as a handle for the incremental construction of a
21 * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
22 * tcw and associated data. The data structures are contained inside a single
23 * contiguous buffer provided by the user.
24 *
25 * The itcw construction functions take care of overall data integrity:
26 * - reset unused fields to zero
27 * - fill in required pointers
28 * - ensure required alignment for data structures
29 * - prevent data structures to cross 4k-byte boundary where required
30 * - calculate tccb-related length fields
31 * - optionally provide ready-made interrogate tcw and associated structures
32 *
33 * Restrictions apply to the itcws created with these construction functions:
34 * - tida only supported for data address, not for tccb
35 * - only contiguous tidaw-lists (no ttic)
36 * - total number of bytes required per itcw may not exceed 4k bytes
37 * - either read or write operation (may not work with r=0 and w=0)
38 *
39 * Example:
40 * struct itcw *itcw;
41 * void *buffer;
42 * size_t size;
43 *
44 * size = itcw_calc_size(1, 2, 0);
45 * buffer = kmalloc(size, GFP_DMA);
46 * if (!buffer)
47 * return -ENOMEM;
48 * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
49 * if (IS_ERR(itcw))
50 * return PTR_ER(itcw);
51 * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
52 * itcw_add_tidaw(itcw, 0, 0x30000, 20);
53 * itcw_add_tidaw(itcw, 0, 0x40000, 52);
54 * itcw_finalize(itcw);
55 *
56 */
57struct itcw {
58 struct tcw *tcw;
59 struct tcw *intrg_tcw;
60 int num_tidaws;
61 int max_tidaws;
62 int intrg_num_tidaws;
63 int intrg_max_tidaws;
64};
65
66/**
67 * itcw_get_tcw - return pointer to tcw associated with the itcw
68 * @itcw: address of the itcw
69 *
70 * Return pointer to the tcw associated with the itcw.
71 */
72struct tcw *itcw_get_tcw(struct itcw *itcw)
73{
74 return itcw->tcw;
75}
76EXPORT_SYMBOL(itcw_get_tcw);
77
78/**
79 * itcw_calc_size - return the size of an itcw with the given parameters
80 * @intrg: if non-zero, add an interrogate tcw
81 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
82 * if no tida is to be used.
83 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
84 * by the interrogate tcw, if specified
85 *
86 * Calculate and return the number of bytes required to hold an itcw with the
87 * given parameters and assuming tccbs with maximum size.
88 *
89 * Note that the resulting size also contains bytes needed for alignment
90 * padding as well as padding to ensure that data structures don't cross a
91 * 4k-boundary where required.
92 */
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{
95 size_t len;
96
97 /* Main data. */
98 len = sizeof(struct itcw);
99 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
100 /* TSB */ sizeof(struct tsb) +
101 /* TIDAL */ max_tidaws * sizeof(struct tidaw);
102 /* Interrogate data. */
103 if (intrg) {
104 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
105 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 }
108 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
112 len += max(max_tidaws, intrg_max_tidaws) *
113 sizeof(struct tidaw) - 1;
114 return len;
115}
116EXPORT_SYMBOL(itcw_calc_size);
117
118#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
119
120static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
121 int align, int check_4k)
122{
123 addr_t addr;
124
125 addr = ALIGN(*start, align);
126 if (check_4k && CROSS4K(addr, len)) {
127 addr = ALIGN(addr, 4096);
128 addr = ALIGN(addr, align);
129 }
130 if (addr + len > end)
131 return ERR_PTR(-ENOSPC);
132 *start = addr + len;
133 return (void *) addr;
134}
135
136/**
137 * itcw_init - initialize incremental tcw data structure
138 * @buffer: address of buffer to use for data structures
139 * @size: number of bytes in buffer
140 * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
141 * operation tcw
142 * @intrg: if non-zero, add and initialize an interrogate tcw
143 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
144 * if no tida is to be used.
145 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
146 * by the interrogate tcw, if specified
147 *
148 * Prepare the specified buffer to be used as an incremental tcw, i.e. a
149 * helper data structure that can be used to construct a valid tcw by
150 * successive calls to other helper functions. Note: the buffer needs to be
151 * located below the 2G address limit. The resulting tcw has the following
152 * restrictions:
153 * - no tccb tidal
154 * - input/output tidal is contiguous (no ttic)
155 * - total data should not exceed 4k
156 * - tcw specifies either read or write operation
157 *
158 * On success, return pointer to the resulting incremental tcw data structure,
159 * ERR_PTR otherwise.
160 */
161struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
162 int max_tidaws, int intrg_max_tidaws)
163{
164 struct itcw *itcw;
165 void *chunk;
166 addr_t start;
167 addr_t end;
168
169 /* Check for 2G limit. */
170 start = (addr_t) buffer;
171 end = start + size;
172 if (end > (1 << 31))
173 return ERR_PTR(-EINVAL);
174 memset(buffer, 0, size);
175 /* ITCW. */
176 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
177 if (IS_ERR(chunk))
178 return chunk;
179 itcw = chunk;
180 itcw->max_tidaws = max_tidaws;
181 itcw->intrg_max_tidaws = intrg_max_tidaws;
182 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk))
185 return chunk;
186 itcw->tcw = chunk;
187 tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
188 (op == ITCW_OP_WRITE) ? 1 : 0);
189 /* Interrogate TCW. */
190 if (intrg) {
191 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
192 if (IS_ERR(chunk))
193 return chunk;
194 itcw->intrg_tcw = chunk;
195 tcw_init(itcw->intrg_tcw, 1, 0);
196 tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
197 }
198 /* Data TIDAL. */
199 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1);
202 if (IS_ERR(chunk))
203 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1);
205 }
206 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1);
210 if (IS_ERR(chunk))
211 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1);
213 }
214 /* TSB. */
215 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
216 if (IS_ERR(chunk))
217 return chunk;
218 tsb_init(chunk);
219 tcw_set_tsb(itcw->tcw, chunk);
220 /* Interrogate TSB. */
221 if (intrg) {
222 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
223 if (IS_ERR(chunk))
224 return chunk;
225 tsb_init(chunk);
226 tcw_set_tsb(itcw->intrg_tcw, chunk);
227 }
228 /* TCCB. */
229 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
230 if (IS_ERR(chunk))
231 return chunk;
232 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
233 tcw_set_tccb(itcw->tcw, chunk);
234 /* Interrogate TCCB. */
235 if (intrg) {
236 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
237 if (IS_ERR(chunk))
238 return chunk;
239 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
240 tcw_set_tccb(itcw->intrg_tcw, chunk);
241 tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
242 sizeof(struct dcw_intrg_data), 0);
243 tcw_finalize(itcw->intrg_tcw, 0);
244 }
245 return itcw;
246}
247EXPORT_SYMBOL(itcw_init);
248
249/**
250 * itcw_add_dcw - add a dcw to the itcw
251 * @itcw: address of the itcw
252 * @cmd: the dcw command
253 * @flags: flags for the dcw
254 * @cd: address of control data for this dcw or NULL if none is required
255 * @cd_count: number of control data bytes for this dcw
256 * @count: number of data bytes for this dcw
257 *
258 * Add a new dcw to the specified itcw by writing the dcw information specified
259 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
260 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
261 * would exceed the available space.
262 *
263 * Note: the tcal field of the tccb header will be updated to reflect added
264 * content.
265 */
266struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
267 u8 cd_count, u32 count)
268{
269 return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
270 flags, cd, cd_count, count);
271}
272EXPORT_SYMBOL(itcw_add_dcw);
273
274/**
275 * itcw_add_tidaw - add a tidaw to the itcw
276 * @itcw: address of the itcw
277 * @flags: flags for the new tidaw
278 * @addr: address value for the new tidaw
279 * @count: count value for the new tidaw
280 *
281 * Add a new tidaw to the input/output data tidaw-list of the specified itcw
282 * (depending on the value of the r-flag and w-flag). Return a pointer to
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space.
285 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
288 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{
291 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC);
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294}
295EXPORT_SYMBOL(itcw_add_tidaw);
296
297/**
298 * itcw_set_data - set data address and tida flag of the itcw
299 * @itcw: address of the itcw
300 * @addr: the data address
301 * @use_tidal: zero of the data address specifies a contiguous block of data,
302 * non-zero if it specifies a list if tidaws.
303 *
304 * Set the input/output data address of the itcw (depending on the value of the
305 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
306 * is set as well.
307 */
308void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
309{
310 tcw_set_data(itcw->tcw, addr, use_tidal);
311}
312EXPORT_SYMBOL(itcw_set_data);
313
314/**
315 * itcw_finalize - calculate length and count fields of the itcw
316 * @itcw: address of the itcw
317 *
318 * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
319 * In case input- or output-tida is used, the tidaw-list must be stored in
320 * continuous storage (no ttic). The tcal field in the tccb must be
321 * up-to-date.
322 */
323void itcw_finalize(struct itcw *itcw)
324{
325 tcw_finalize(itcw->tcw, itcw->num_tidaws);
326}
327EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 445cf364e461..2bf36e14b102 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev)
2082 default: 2082 default:
2083 BUG(); 2083 BUG();
2084 } 2084 }
2085 ccw_device_set_timeout(cdev, 0);
2086 wake_up(&cdev->private->wait_q); 2085 wake_up(&cdev->private->wait_q);
2087} 2086}
2088 2087
@@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2121 case -EIO: 2120 case -EIO:
2122 QDIO_PRINT_ERR("i/o error on device %s\n", 2121 QDIO_PRINT_ERR("i/o error on device %s\n",
2123 cdev->dev.bus_id); 2122 cdev->dev.bus_id);
2123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2124 wake_up(&cdev->private->wait_q);
2124 return; 2125 return;
2125 case -ETIMEDOUT: 2126 case -ETIMEDOUT:
2126 qdio_timeout_handler(cdev); 2127 qdio_timeout_handler(cdev);
@@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2139 QDIO_DBF_TEXT4(0, trace, dbf_text); 2140 QDIO_DBF_TEXT4(0, trace, dbf_text);
2140#endif /* CONFIG_QDIO_DEBUG */ 2141#endif /* CONFIG_QDIO_DEBUG */
2141 2142
2142 cstat = irb->scsw.cstat; 2143 cstat = irb->scsw.cmd.cstat;
2143 dstat = irb->scsw.dstat; 2144 dstat = irb->scsw.cmd.dstat;
2144 2145
2145 switch (irq_ptr->state) { 2146 switch (irq_ptr->state) {
2146 case QDIO_IRQ_STATE_INACTIVE: 2147 case QDIO_IRQ_STATE_INACTIVE:
@@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void)
2353{ 2354{
2354 char dbf_text[15]; 2355 char dbf_text[15];
2355 2356
2356 if (!css_characteristics_avail)
2357 return -EIO;
2358
2359 /* Check for bit 41. */ 2357 /* Check for bit 41. */
2360 if (!css_general_characteristics.aif) { 2358 if (!css_general_characteristics.aif) {
2361 QDIO_PRINT_WARN("Adapter interruption facility not " \ 2359 QDIO_PRINT_WARN("Adapter interruption facility not " \
@@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2667 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2665 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2668 } else if (rc == 0) { 2666 } else if (rc == 0) {
2669 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 2667 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2670 ccw_device_set_timeout(cdev, timeout);
2671 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); 2668 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2672 2669
2673 wait_event(cdev->private->wait_q, 2670 wait_event_interruptible_timeout(cdev->private->wait_q,
2674 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 2671 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2675 irq_ptr->state == QDIO_IRQ_STATE_ERR); 2672 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2673 timeout);
2676 } else { 2674 } else {
2677 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " 2675 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2678 "device %s\n", result, cdev->dev.bus_id); 2676 "device %s\n", result, cdev->dev.bus_id);
@@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2692 2690
2693 /* Ignore errors. */ 2691 /* Ignore errors. */
2694 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 2692 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2695 ccw_device_set_timeout(cdev, 0);
2696out: 2693out:
2697 up(&irq_ptr->setting_up_sema); 2694 up(&irq_ptr->setting_up_sema);
2698 return result; 2695 return result;
@@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2907 QDIO_DBF_TEXT0(0,setup,dbf_text); 2904 QDIO_DBF_TEXT0(0,setup,dbf_text);
2908 QDIO_DBF_TEXT0(0,trace,dbf_text); 2905 QDIO_DBF_TEXT0(0,trace,dbf_text);
2909 2906
2910 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { 2907 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
2911 ccw_device_set_timeout(cdev, 0);
2912 return; 2908 return;
2913 }
2914 2909
2915 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); 2910 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2916 ccw_device_set_timeout(cdev, 0);
2917} 2911}
2918 2912
2919int 2913int
@@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data)
3196 irq_ptr->schid.ssid, irq_ptr->schid.sch_no, 3190 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3197 result, result2); 3191 result, result2);
3198 result=result2; 3192 result=result2;
3199 if (result)
3200 ccw_device_set_timeout(cdev, 0);
3201 } 3193 }
3202 3194
3203 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); 3195 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
@@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags)
3279 3271
3280 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); 3272 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3281 3273
3282 ccw_device_set_timeout(cdev, 0);
3283 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 3274 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3284 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, 3275 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3285 0, DOIO_DENY_PREFETCH); 3276 0, DOIO_DENY_PREFETCH);
@@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void)
3722 char dbf_text[20]; 3713 char dbf_text[20];
3723 3714
3724 tiqdio_ind = 3715 tiqdio_ind =
3725 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); 3716 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
3717 TIQDIO_THININT_ISC);
3726 if (IS_ERR(tiqdio_ind)) { 3718 if (IS_ERR(tiqdio_ind)) {
3727 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); 3719 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3728 QDIO_DBF_TEXT0(0,setup,dbf_text); 3720 QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -3738,7 +3730,8 @@ static void
3738tiqdio_unregister_thinints(void) 3730tiqdio_unregister_thinints(void)
3739{ 3731{
3740 if (tiqdio_ind) 3732 if (tiqdio_ind)
3741 s390_unregister_adapter_interrupt(tiqdio_ind); 3733 s390_unregister_adapter_interrupt(tiqdio_ind,
3734 TIQDIO_THININT_ISC);
3742} 3735}
3743 3736
3744static int 3737static int
@@ -3899,6 +3892,7 @@ init_QDIO(void)
3899 qdio_mempool_alloc, 3892 qdio_mempool_alloc,
3900 qdio_mempool_free, NULL); 3893 qdio_mempool_free, NULL);
3901 3894
3895 isc_register(QDIO_AIRQ_ISC);
3902 if (tiqdio_check_chsc_availability()) 3896 if (tiqdio_check_chsc_availability())
3903 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); 3897 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3904 3898
@@ -3911,6 +3905,7 @@ static void __exit
3911cleanup_QDIO(void) 3905cleanup_QDIO(void)
3912{ 3906{
3913 tiqdio_unregister_thinints(); 3907 tiqdio_unregister_thinints();
3908 isc_unregister(QDIO_AIRQ_ISC);
3914 qdio_remove_procfs_entry(); 3909 qdio_remove_procfs_entry();
3915 qdio_release_qdio_memory(); 3910 qdio_release_qdio_memory();
3916 qdio_unregister_dbf_views(); 3911 qdio_unregister_dbf_views();
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c3df6b2c38b7..7656081a24d2 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -2,8 +2,8 @@
2#define _CIO_QDIO_H 2#define _CIO_QDIO_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5#include <asm/isc.h>
6#include "schid.h" 6#include <asm/schid.h>
7 7
8#ifdef CONFIG_QDIO_DEBUG 8#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 9#define QDIO_VERBOSE_LEVEL 9
@@ -26,7 +26,7 @@
26 */ 26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4 27#define IQDIO_FILL_LEVEL_TO_POLL 4
28 28
29#define TIQDIO_THININT_ISC 3 29#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
30#define TIQDIO_DELAY_TARGET 0 30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ 31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ 32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
deleted file mode 100644
index 54328fec5ade..000000000000
--- a/drivers/s390/cio/schid.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
new file mode 100644
index 000000000000..f8da25ab576d
--- /dev/null
+++ b/drivers/s390/cio/scsw.c
@@ -0,0 +1,843 @@
1/*
2 * Helper functions for scsw access.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <asm/cio.h>
11#include "css.h"
12#include "chsc.h"
13
14/**
15 * scsw_is_tm - check for transport mode scsw
16 * @scsw: pointer to scsw
17 *
18 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise.
20 */
21int scsw_is_tm(union scsw *scsw)
22{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24}
25EXPORT_SYMBOL(scsw_is_tm);
26
27/**
28 * scsw_key - return scsw key field
29 * @scsw: pointer to scsw
30 *
31 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw.
33 */
34u32 scsw_key(union scsw *scsw)
35{
36 if (scsw_is_tm(scsw))
37 return scsw->tm.key;
38 else
39 return scsw->cmd.key;
40}
41EXPORT_SYMBOL(scsw_key);
42
43/**
44 * scsw_eswf - return scsw eswf field
45 * @scsw: pointer to scsw
46 *
47 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw.
49 */
50u32 scsw_eswf(union scsw *scsw)
51{
52 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf;
54 else
55 return scsw->cmd.eswf;
56}
57EXPORT_SYMBOL(scsw_eswf);
58
59/**
60 * scsw_cc - return scsw cc field
61 * @scsw: pointer to scsw
62 *
63 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw.
65 */
66u32 scsw_cc(union scsw *scsw)
67{
68 if (scsw_is_tm(scsw))
69 return scsw->tm.cc;
70 else
71 return scsw->cmd.cc;
72}
73EXPORT_SYMBOL(scsw_cc);
74
75/**
76 * scsw_ectl - return scsw ectl field
77 * @scsw: pointer to scsw
78 *
79 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw.
81 */
82u32 scsw_ectl(union scsw *scsw)
83{
84 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl;
86 else
87 return scsw->cmd.ectl;
88}
89EXPORT_SYMBOL(scsw_ectl);
90
91/**
92 * scsw_pno - return scsw pno field
93 * @scsw: pointer to scsw
94 *
95 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw.
97 */
98u32 scsw_pno(union scsw *scsw)
99{
100 if (scsw_is_tm(scsw))
101 return scsw->tm.pno;
102 else
103 return scsw->cmd.pno;
104}
105EXPORT_SYMBOL(scsw_pno);
106
107/**
108 * scsw_fctl - return scsw fctl field
109 * @scsw: pointer to scsw
110 *
111 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw.
113 */
114u32 scsw_fctl(union scsw *scsw)
115{
116 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl;
118 else
119 return scsw->cmd.fctl;
120}
121EXPORT_SYMBOL(scsw_fctl);
122
123/**
124 * scsw_actl - return scsw actl field
125 * @scsw: pointer to scsw
126 *
127 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw.
129 */
130u32 scsw_actl(union scsw *scsw)
131{
132 if (scsw_is_tm(scsw))
133 return scsw->tm.actl;
134 else
135 return scsw->cmd.actl;
136}
137EXPORT_SYMBOL(scsw_actl);
138
139/**
140 * scsw_stctl - return scsw stctl field
141 * @scsw: pointer to scsw
142 *
143 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw.
145 */
146u32 scsw_stctl(union scsw *scsw)
147{
148 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl;
150 else
151 return scsw->cmd.stctl;
152}
153EXPORT_SYMBOL(scsw_stctl);
154
155/**
156 * scsw_dstat - return scsw dstat field
157 * @scsw: pointer to scsw
158 *
159 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw.
161 */
162u32 scsw_dstat(union scsw *scsw)
163{
164 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat;
166 else
167 return scsw->cmd.dstat;
168}
169EXPORT_SYMBOL(scsw_dstat);
170
171/**
172 * scsw_cstat - return scsw cstat field
173 * @scsw: pointer to scsw
174 *
175 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw.
177 */
178u32 scsw_cstat(union scsw *scsw)
179{
180 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat;
182 else
183 return scsw->cmd.cstat;
184}
185EXPORT_SYMBOL(scsw_cstat);
186
187/**
188 * scsw_cmd_is_valid_key - check key field validity
189 * @scsw: pointer to scsw
190 *
191 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise.
193 */
194int scsw_cmd_is_valid_key(union scsw *scsw)
195{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199
200/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity
202 * @scsw: pointer to scsw
203 *
204 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise.
206 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212
213/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity
215 * @scsw: pointer to scsw
216 *
217 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise.
219 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225
226/**
227 * scsw_cmd_is_valid_cc - check cc field validity
228 * @scsw: pointer to scsw
229 *
230 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise.
232 */
233int scsw_cmd_is_valid_cc(union scsw *scsw)
234{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239
240/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity
242 * @scsw: pointer to scsw
243 *
244 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise.
246 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252
253/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity
255 * @scsw: pointer to scsw
256 *
257 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise.
259 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265
266/**
267 * scsw_cmd_is_valid_isic - check isic field validity
268 * @scsw: pointer to scsw
269 *
270 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise.
272 */
273int scsw_cmd_is_valid_isic(union scsw *scsw)
274{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278
279/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity
281 * @scsw: pointer to scsw
282 *
283 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise.
285 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291
292/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity
294 * @scsw: pointer to scsw
295 *
296 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise.
298 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304
305/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity
307 * @scsw: pointer to scsw
308 *
309 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise.
311 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318
319/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity
321 * @scsw: pointer to scsw
322 *
323 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise.
325 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333
334/**
335 * scsw_cmd_is_valid_pno - check pno field validity
336 * @scsw: pointer to scsw
337 *
338 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise.
340 */
341int scsw_cmd_is_valid_pno(union scsw *scsw)
342{
343 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
345 (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350
351/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity
353 * @scsw: pointer to scsw
354 *
355 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise.
357 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{
360 /* Only valid if pmcw.dnv == 1*/
361 return 1;
362}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364
365/**
366 * scsw_cmd_is_valid_actl - check actl field validity
367 * @scsw: pointer to scsw
368 *
369 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise.
371 */
372int scsw_cmd_is_valid_actl(union scsw *scsw)
373{
374 /* Only valid if pmcw.dnv == 1*/
375 return 1;
376}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378
379/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity
381 * @scsw: pointer to scsw
382 *
383 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise.
385 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{
388 /* Only valid if pmcw.dnv == 1*/
389 return 1;
390}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392
393/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity
395 * @scsw: pointer to scsw
396 *
397 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise.
399 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3);
404}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406
407/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity
409 * @scsw: pointer to scsw
410 *
411 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise.
413 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3);
418}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420
421/**
422 * scsw_tm_is_valid_key - check key field validity
423 * @scsw: pointer to scsw
424 *
425 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise.
427 */
428int scsw_tm_is_valid_key(union scsw *scsw)
429{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433
434/**
435 * scsw_tm_is_valid_eswf - check eswf field validity
436 * @scsw: pointer to scsw
437 *
438 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise.
440 */
441int scsw_tm_is_valid_eswf(union scsw *scsw)
442{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446
447/**
448 * scsw_tm_is_valid_cc - check cc field validity
449 * @scsw: pointer to scsw
450 *
451 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise.
453 */
454int scsw_tm_is_valid_cc(union scsw *scsw)
455{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460
461/**
462 * scsw_tm_is_valid_fmt - check fmt field validity
463 * @scsw: pointer to scsw
464 *
465 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise.
467 */
468int scsw_tm_is_valid_fmt(union scsw *scsw)
469{
470 return 1;
471}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473
474/**
475 * scsw_tm_is_valid_x - check x field validity
476 * @scsw: pointer to scsw
477 *
478 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise.
480 */
481int scsw_tm_is_valid_x(union scsw *scsw)
482{
483 return 1;
484}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486
487/**
488 * scsw_tm_is_valid_q - check q field validity
489 * @scsw: pointer to scsw
490 *
491 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise.
493 */
494int scsw_tm_is_valid_q(union scsw *scsw)
495{
496 return 1;
497}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499
500/**
501 * scsw_tm_is_valid_ectl - check ectl field validity
502 * @scsw: pointer to scsw
503 *
504 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise.
506 */
507int scsw_tm_is_valid_ectl(union scsw *scsw)
508{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514
515/**
516 * scsw_tm_is_valid_pno - check pno field validity
517 * @scsw: pointer to scsw
518 *
519 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise.
521 */
522int scsw_tm_is_valid_pno(union scsw *scsw)
523{
524 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
526 (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531
532/**
533 * scsw_tm_is_valid_fctl - check fctl field validity
534 * @scsw: pointer to scsw
535 *
536 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise.
538 */
539int scsw_tm_is_valid_fctl(union scsw *scsw)
540{
541 /* Only valid if pmcw.dnv == 1*/
542 return 1;
543}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545
546/**
547 * scsw_tm_is_valid_actl - check actl field validity
548 * @scsw: pointer to scsw
549 *
550 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise.
552 */
553int scsw_tm_is_valid_actl(union scsw *scsw)
554{
555 /* Only valid if pmcw.dnv == 1*/
556 return 1;
557}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559
560/**
561 * scsw_tm_is_valid_stctl - check stctl field validity
562 * @scsw: pointer to scsw
563 *
564 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise.
566 */
567int scsw_tm_is_valid_stctl(union scsw *scsw)
568{
569 /* Only valid if pmcw.dnv == 1*/
570 return 1;
571}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573
574/**
575 * scsw_tm_is_valid_dstat - check dstat field validity
576 * @scsw: pointer to scsw
577 *
578 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise.
580 */
581int scsw_tm_is_valid_dstat(union scsw *scsw)
582{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3);
585}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587
588/**
589 * scsw_tm_is_valid_cstat - check cstat field validity
590 * @scsw: pointer to scsw
591 *
592 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise.
594 */
595int scsw_tm_is_valid_cstat(union scsw *scsw)
596{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3);
599}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601
602/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity
604 * @scsw: pointer to scsw
605 *
606 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise.
608 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{
611 return 1;
612}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614
615/**
616 * scsw_tm_is_valid_schxs - check schxs field validity
617 * @scsw: pointer to scsw
618 *
619 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise.
621 */
622int scsw_tm_is_valid_schxs(union scsw *scsw)
623{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK));
628}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630
631/**
632 * scsw_is_valid_actl - check actl field validity
633 * @scsw: pointer to scsw
634 *
635 * Return non-zero if the actl field of the specified scsw is valid,
636 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value.
638 */
639int scsw_is_valid_actl(union scsw *scsw)
640{
641 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw);
643 else
644 return scsw_cmd_is_valid_actl(scsw);
645}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647
648/**
649 * scsw_is_valid_cc - check cc field validity
650 * @scsw: pointer to scsw
651 *
652 * Return non-zero if the cc field of the specified scsw is valid,
653 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value.
655 */
656int scsw_is_valid_cc(union scsw *scsw)
657{
658 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw);
660 else
661 return scsw_cmd_is_valid_cc(scsw);
662}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664
665/**
666 * scsw_is_valid_cstat - check cstat field validity
667 * @scsw: pointer to scsw
668 *
669 * Return non-zero if the cstat field of the specified scsw is valid,
670 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value.
672 */
673int scsw_is_valid_cstat(union scsw *scsw)
674{
675 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw);
677 else
678 return scsw_cmd_is_valid_cstat(scsw);
679}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681
682/**
683 * scsw_is_valid_dstat - check dstat field validity
684 * @scsw: pointer to scsw
685 *
686 * Return non-zero if the dstat field of the specified scsw is valid,
687 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value.
689 */
690int scsw_is_valid_dstat(union scsw *scsw)
691{
692 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw);
694 else
695 return scsw_cmd_is_valid_dstat(scsw);
696}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698
699/**
700 * scsw_is_valid_ectl - check ectl field validity
701 * @scsw: pointer to scsw
702 *
703 * Return non-zero if the ectl field of the specified scsw is valid,
704 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value.
706 */
707int scsw_is_valid_ectl(union scsw *scsw)
708{
709 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw);
711 else
712 return scsw_cmd_is_valid_ectl(scsw);
713}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715
716/**
717 * scsw_is_valid_eswf - check eswf field validity
718 * @scsw: pointer to scsw
719 *
720 * Return non-zero if the eswf field of the specified scsw is valid,
721 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value.
723 */
724int scsw_is_valid_eswf(union scsw *scsw)
725{
726 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw);
728 else
729 return scsw_cmd_is_valid_eswf(scsw);
730}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732
733/**
734 * scsw_is_valid_fctl - check fctl field validity
735 * @scsw: pointer to scsw
736 *
737 * Return non-zero if the fctl field of the specified scsw is valid,
738 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value.
740 */
741int scsw_is_valid_fctl(union scsw *scsw)
742{
743 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw);
745 else
746 return scsw_cmd_is_valid_fctl(scsw);
747}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749
750/**
751 * scsw_is_valid_key - check key field validity
752 * @scsw: pointer to scsw
753 *
754 * Return non-zero if the key field of the specified scsw is valid,
755 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value.
757 */
758int scsw_is_valid_key(union scsw *scsw)
759{
760 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw);
762 else
763 return scsw_cmd_is_valid_key(scsw);
764}
765EXPORT_SYMBOL(scsw_is_valid_key);
766
767/**
768 * scsw_is_valid_pno - check pno field validity
769 * @scsw: pointer to scsw
770 *
771 * Return non-zero if the pno field of the specified scsw is valid,
772 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value.
774 */
775int scsw_is_valid_pno(union scsw *scsw)
776{
777 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw);
779 else
780 return scsw_cmd_is_valid_pno(scsw);
781}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783
784/**
785 * scsw_is_valid_stctl - check stctl field validity
786 * @scsw: pointer to scsw
787 *
788 * Return non-zero if the stctl field of the specified scsw is valid,
789 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value.
791 */
792int scsw_is_valid_stctl(union scsw *scsw)
793{
794 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw);
796 else
797 return scsw_cmd_is_valid_stctl(scsw);
798}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800
801/**
802 * scsw_cmd_is_solicited - check for solicited scsw
803 * @scsw: pointer to scsw
804 *
805 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited.
807 */
808int scsw_cmd_is_solicited(union scsw *scsw)
809{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814
815/**
816 * scsw_tm_is_solicited - check for solicited scsw
817 * @scsw: pointer to scsw
818 *
819 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited.
821 */
822int scsw_tm_is_solicited(union scsw *scsw)
823{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828
829/**
830 * scsw_is_solicited - check for solicited scsw
831 * @scsw: pointer to scsw
832 *
833 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited.
835 */
836int scsw_is_solicited(union scsw *scsw)
837{
838 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw);
840 else
841 return scsw_cmd_is_solicited(scsw);
842}
843EXPORT_SYMBOL(scsw_is_solicited);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1ab3e3efd11..62b6b55230d0 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -34,13 +34,15 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <asm/s390_rdev.h> 35#include <asm/s390_rdev.h>
36#include <asm/reset.h> 36#include <asm/reset.h>
37#include <linux/hrtimer.h>
38#include <linux/ktime.h>
37 39
38#include "ap_bus.h" 40#include "ap_bus.h"
39 41
40/* Some prototypes. */ 42/* Some prototypes. */
41static void ap_scan_bus(struct work_struct *); 43static void ap_scan_bus(struct work_struct *);
42static void ap_poll_all(unsigned long); 44static void ap_poll_all(unsigned long);
43static void ap_poll_timeout(unsigned long); 45static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
44static int ap_poll_thread_start(void); 46static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void); 47static void ap_poll_thread_stop(void);
46static void ap_request_timeout(unsigned long); 48static void ap_request_timeout(unsigned long);
@@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus);
80/* 82/*
81 * Tasklet & timer for AP request polling. 83 * Tasklet & timer for AP request polling.
82 */ 84 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
84static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 85static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
85static atomic_t ap_poll_requests = ATOMIC_INIT(0); 86static atomic_t ap_poll_requests = ATOMIC_INIT(0);
86static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 87static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
87static struct task_struct *ap_poll_kthread = NULL; 88static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex); 89static DEFINE_MUTEX(ap_poll_thread_mutex);
90static struct hrtimer ap_poll_timer;
91/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
92 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
93static unsigned long long poll_timeout = 250000;
89 94
90/** 95/**
91 * ap_intructions_available() - Test if AP instructions are available. 96 * ap_intructions_available() - Test if AP instructions are available.
@@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
636 641
637static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 642static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
638 643
644static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
645{
646 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
647}
648
649static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
650 size_t count)
651{
652 unsigned long long time;
653 ktime_t hr_time;
654
655 /* 120 seconds = maximum poll interval */
656 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000)
657 return -EINVAL;
658 poll_timeout = time;
659 hr_time = ktime_set(0, poll_timeout);
660
661 if (!hrtimer_is_queued(&ap_poll_timer) ||
662 !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) {
663 ap_poll_timer.expires = hr_time;
664 hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS);
665 }
666 return count;
667}
668
669static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
670
639static struct bus_attribute *const ap_bus_attrs[] = { 671static struct bus_attribute *const ap_bus_attrs[] = {
640 &bus_attr_ap_domain, 672 &bus_attr_ap_domain,
641 &bus_attr_config_time, 673 &bus_attr_config_time,
642 &bus_attr_poll_thread, 674 &bus_attr_poll_thread,
643 NULL 675 &bus_attr_poll_timeout,
676 NULL,
644}; 677};
645 678
646/** 679/**
@@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr)
895 */ 928 */
896static inline void ap_schedule_poll_timer(void) 929static inline void ap_schedule_poll_timer(void)
897{ 930{
898 if (timer_pending(&ap_poll_timer)) 931 if (hrtimer_is_queued(&ap_poll_timer))
899 return; 932 return;
900 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); 933 hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
934 HRTIMER_MODE_ABS);
901} 935}
902 936
903/** 937/**
@@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message);
1115 1149
1116/** 1150/**
1117 * ap_poll_timeout(): AP receive polling for finished AP requests. 1151 * ap_poll_timeout(): AP receive polling for finished AP requests.
1118 * @unused: Unused variable. 1152 * @unused: Unused pointer.
1119 * 1153 *
1120 * Schedules the AP tasklet. 1154 * Schedules the AP tasklet using a high resolution timer.
1121 */ 1155 */
1122static void ap_poll_timeout(unsigned long unused) 1156static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1123{ 1157{
1124 tasklet_schedule(&ap_tasklet); 1158 tasklet_schedule(&ap_tasklet);
1159 return HRTIMER_NORESTART;
1125} 1160}
1126 1161
1127/** 1162/**
@@ -1344,6 +1379,14 @@ int __init ap_module_init(void)
1344 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1379 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1345 add_timer(&ap_config_timer); 1380 add_timer(&ap_config_timer);
1346 1381
1382 /* Setup the high resultion poll timer.
1383 * If we are running under z/VM adjust polling to z/VM polling rate.
1384 */
1385 if (MACHINE_IS_VM)
1386 poll_timeout = 1500000;
1387 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1388 ap_poll_timer.function = ap_poll_timeout;
1389
1347 /* Start the low priority AP bus poll thread. */ 1390 /* Start the low priority AP bus poll thread. */
1348 if (ap_thread_flag) { 1391 if (ap_thread_flag) {
1349 rc = ap_poll_thread_start(); 1392 rc = ap_poll_thread_start();
@@ -1355,7 +1398,7 @@ int __init ap_module_init(void)
1355 1398
1356out_work: 1399out_work:
1357 del_timer_sync(&ap_config_timer); 1400 del_timer_sync(&ap_config_timer);
1358 del_timer_sync(&ap_poll_timer); 1401 hrtimer_cancel(&ap_poll_timer);
1359 destroy_workqueue(ap_work_queue); 1402 destroy_workqueue(ap_work_queue);
1360out_root: 1403out_root:
1361 s390_root_dev_unregister(ap_root_device); 1404 s390_root_dev_unregister(ap_root_device);
@@ -1386,7 +1429,7 @@ void ap_module_exit(void)
1386 ap_reset_domain(); 1429 ap_reset_domain();
1387 ap_poll_thread_stop(); 1430 ap_poll_thread_stop();
1388 del_timer_sync(&ap_config_timer); 1431 del_timer_sync(&ap_config_timer);
1389 del_timer_sync(&ap_poll_timer); 1432 hrtimer_cancel(&ap_poll_timer);
1390 destroy_workqueue(ap_work_queue); 1433 destroy_workqueue(ap_work_queue);
1391 tasklet_kill(&ap_tasklet); 1434 tasklet_kill(&ap_tasklet);
1392 s390_root_dev_unregister(ap_root_device); 1435 s390_root_dev_unregister(ap_root_device);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index c1e1200c43fc..446378b308fc 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -92,6 +92,8 @@ struct ap_queue_status {
92#define AP_DEVICE_TYPE_PCIXCC 5 92#define AP_DEVICE_TYPE_PCIXCC 5
93#define AP_DEVICE_TYPE_CEX2A 6 93#define AP_DEVICE_TYPE_CEX2A 6
94#define AP_DEVICE_TYPE_CEX2C 7 94#define AP_DEVICE_TYPE_CEX2C 7
95#define AP_DEVICE_TYPE_CEX2A2 8
96#define AP_DEVICE_TYPE_CEX2C2 9
95 97
96/* 98/*
97 * AP reset flag states 99 * AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4d36e805a234..cb22b97944b8 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -34,6 +34,7 @@
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/smp_lock.h>
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <linux/hw_random.h> 40#include <linux/hw_random.h>
@@ -300,7 +301,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
300 */ 301 */
301static int zcrypt_open(struct inode *inode, struct file *filp) 302static int zcrypt_open(struct inode *inode, struct file *filp)
302{ 303{
304 lock_kernel();
303 atomic_inc(&zcrypt_open_count); 305 atomic_inc(&zcrypt_open_count);
306 unlock_kernel();
304 return 0; 307 return 0;
305} 308}
306 309
@@ -1068,10 +1071,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1068 1071
1069#define LBUFSIZE 1200UL 1072#define LBUFSIZE 1200UL
1070 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1073 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1071 if (!lbuf) { 1074 if (!lbuf)
1072 PRINTK("kmalloc failed!\n");
1073 return 0; 1075 return 0;
1074 }
1075 1076
1076 local_count = min(LBUFSIZE - 1, count); 1077 local_count = min(LBUFSIZE - 1, count);
1077 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1078 if (copy_from_user(lbuf, buffer, local_count) != 0) {
@@ -1081,23 +1082,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1081 lbuf[local_count] = '\0'; 1082 lbuf[local_count] = '\0';
1082 1083
1083 ptr = strstr(lbuf, "Online devices"); 1084 ptr = strstr(lbuf, "Online devices");
1084 if (!ptr) { 1085 if (!ptr)
1085 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1086 goto out; 1086 goto out;
1087 }
1088 ptr = strstr(ptr, "\n"); 1087 ptr = strstr(ptr, "\n");
1089 if (!ptr) { 1088 if (!ptr)
1090 PRINTK("Unable to parse data (missing newline "
1091 "after \"Online devices\")\n");
1092 goto out; 1089 goto out;
1093 }
1094 ptr++; 1090 ptr++;
1095 1091
1096 if (strstr(ptr, "Waiting work element counts") == NULL) { 1092 if (strstr(ptr, "Waiting work element counts") == NULL)
1097 PRINTK("Unable to parse data (missing "
1098 "\"Waiting work element counts\")\n");
1099 goto out; 1093 goto out;
1100 }
1101 1094
1102 for (j = 0; j < 64 && *ptr; ptr++) { 1095 for (j = 0; j < 64 && *ptr; ptr++) {
1103 /* 1096 /*
@@ -1197,16 +1190,12 @@ int __init zcrypt_api_init(void)
1197 1190
1198 /* Register the request sprayer. */ 1191 /* Register the request sprayer. */
1199 rc = misc_register(&zcrypt_misc_device); 1192 rc = misc_register(&zcrypt_misc_device);
1200 if (rc < 0) { 1193 if (rc < 0)
1201 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1202 zcrypt_misc_device.minor, rc);
1203 goto out; 1194 goto out;
1204 }
1205 1195
1206 /* Set up the proc file system */ 1196 /* Set up the proc file system */
1207 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1197 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1208 if (!zcrypt_entry) { 1198 if (!zcrypt_entry) {
1209 PRINTK("Couldn't create z90crypt proc entry\n");
1210 rc = -ENOMEM; 1199 rc = -ENOMEM;
1211 goto out_misc; 1200 goto out_misc;
1212 } 1201 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 5c6e222b2ac4..1d1ec74dadb2 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -30,34 +30,6 @@
30#ifndef _ZCRYPT_API_H_ 30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_ 31#define _ZCRYPT_API_H_
32 32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h" 33#include "ap_bus.h"
62#include <asm/zcrypt.h> 34#include <asm/zcrypt.h>
63 35
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 08657f604b8c..54f4cbc3be9e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -49,6 +49,7 @@
49 49
50static struct ap_device_id zcrypt_cex2a_ids[] = { 50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
52 { /* end of list */ }, 53 { /* end of list */ },
53}; 54};
54 55
@@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev,
242 return convert_type80(zdev, reply, 243 return convert_type80(zdev, reply,
243 outputdata, outputdatalength); 244 outputdata, outputdatalength);
244 default: /* Unknown response type, this should NEVER EVER happen */ 245 default: /* Unknown response type, this should NEVER EVER happen */
245 PRINTK("Unrecognized Message Header: %08x%08x\n",
246 *(unsigned int *) reply->message,
247 *(unsigned int *) (reply->message+4));
248 zdev->online = 0; 246 zdev->online = 0;
249 return -EAGAIN; /* repeat the request on a different device. */ 247 return -EAGAIN; /* repeat the request on a different device. */
250 } 248 }
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 3e27fe77d207..03ba27f05f92 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
92{ 92{
93 struct error_hdr *ehdr = reply->message; 93 struct error_hdr *ehdr = reply->message;
94 94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) { 95 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID: 96 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE: 97 case REP82_ERROR_OPERAND_SIZE:
@@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
123 zdev->online = 0; 119 zdev->online = 0;
124 return -EAGAIN; 120 return -EAGAIN;
125 default: 121 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0; 122 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */ 123 return -EAGAIN; /* repeat the request on a different device. */
130 } 124 }
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 6e93b4751782..12da4815ba8e 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev,
226 return convert_type84(zdev, reply, 226 return convert_type84(zdev, reply,
227 outputdata, outputdatalength); 227 outputdata, outputdatalength);
228 default: /* Unknown response type, this should NEVER EVER happen */ 228 default: /* Unknown response type, this should NEVER EVER happen */
229 PRINTK("Unrecognized Message Header: %08x%08x\n",
230 *(unsigned int *) reply->message,
231 *(unsigned int *) (reply->message+4));
232 zdev->online = 0; 229 zdev->online = 0;
233 return -EAGAIN; /* repeat the request on a different device. */ 230 return -EAGAIN; /* repeat the request on a different device. */
234 } 231 }
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 17ea56ce1c11..779952cb19fc 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev,
361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); 361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
362 if (unlikely(service_rc != 0)) { 362 if (unlikely(service_rc != 0)) {
363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode); 363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
364 if (service_rc == 8 && service_rs == 66) { 364 if (service_rc == 8 && service_rs == 66)
365 PDEBUG("Bad block format on PCICC\n");
366 return -EINVAL; 365 return -EINVAL;
367 } 366 if (service_rc == 8 && service_rs == 65)
368 if (service_rc == 8 && service_rs == 65) {
369 PDEBUG("Probably an even modulus on PCICC\n");
370 return -EINVAL; 367 return -EINVAL;
371 }
372 if (service_rc == 8 && service_rs == 770) { 368 if (service_rc == 8 && service_rs == 770) {
373 PDEBUG("Invalid key length on PCICC\n");
374 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 369 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
375 return -EAGAIN; 370 return -EAGAIN;
376 } 371 }
377 if (service_rc == 8 && service_rs == 783) { 372 if (service_rc == 8 && service_rs == 783) {
378 PDEBUG("Extended bitlengths not enabled on PCICC\n");
379 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
380 return -EAGAIN; 374 return -EAGAIN;
381 } 375 }
382 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
383 service_rc, service_rs);
384 zdev->online = 0; 376 zdev->online = 0;
385 return -EAGAIN; /* repeat the request on a different device. */ 377 return -EAGAIN; /* repeat the request on a different device. */
386 } 378 }
@@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev,
434 outputdata, outputdatalength); 426 outputdata, outputdatalength);
435 /* no break, incorrect cprb version is an unknown response */ 427 /* no break, incorrect cprb version is an unknown response */
436 default: /* Unknown response type, this should NEVER EVER happen */ 428 default: /* Unknown response type, this should NEVER EVER happen */
437 PRINTK("Unrecognized Message Header: %08x%08x\n",
438 *(unsigned int *) reply->message,
439 *(unsigned int *) (reply->message+4));
440 zdev->online = 0; 429 zdev->online = 0;
441 return -EAGAIN; /* repeat the request on a different device. */ 430 return -EAGAIN; /* repeat the request on a different device. */
442 } 431 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 0bc9b3188e64..d8ad36f81540 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -72,6 +72,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
75 { /* end of list */ }, 76 { /* end of list */ },
76}; 77};
77 78
@@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
289 ap_msg->length = sizeof(struct type6_hdr) + 290 ap_msg->length = sizeof(struct type6_hdr) +
290 CEIL4(xcRB->request_control_blk_length) + 291 CEIL4(xcRB->request_control_blk_length) +
291 xcRB->request_data_length; 292 xcRB->request_data_length;
292 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { 293 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
293 PRINTK("Combined message is too large (%ld/%d/%d).\n",
294 sizeof(struct type6_hdr),
295 xcRB->request_control_blk_length,
296 xcRB->request_data_length);
297 return -EFAULT; 294 return -EFAULT;
298 } 295 if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
299 if (CEIL4(xcRB->reply_control_blk_length) >
300 PCIXCC_MAX_XCRB_REPLY_SIZE) {
301 PDEBUG("Reply CPRB length is too large (%d).\n",
302 xcRB->request_control_blk_length);
303 return -EFAULT; 296 return -EFAULT;
304 } 297 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
305 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
306 PDEBUG("Reply data block length is too large (%d).\n",
307 xcRB->reply_data_length);
308 return -EFAULT; 298 return -EFAULT;
309 }
310 replylen = CEIL4(xcRB->reply_control_blk_length) + 299 replylen = CEIL4(xcRB->reply_control_blk_length) +
311 CEIL4(xcRB->reply_data_length) + 300 CEIL4(xcRB->reply_data_length) +
312 sizeof(struct type86_fmt2_msg); 301 sizeof(struct type86_fmt2_msg);
313 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { 302 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
314 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
315 " (%d/%d/%d).\n",
316 sizeof(struct type86_fmt2_msg),
317 xcRB->reply_control_blk_length,
318 xcRB->reply_data_length);
319 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - 303 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
320 (sizeof(struct type86_fmt2_msg) + 304 (sizeof(struct type86_fmt2_msg) +
321 CEIL4(xcRB->reply_data_length)); 305 CEIL4(xcRB->reply_data_length));
322 PDEBUG("Capping Reply CPRB length at %d\n",
323 xcRB->reply_control_blk_length);
324 } 306 }
325 307
326 /* prepare type6 header */ 308 /* prepare type6 header */
@@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
339 xcRB->request_control_blk_length)) 321 xcRB->request_control_blk_length))
340 return -EFAULT; 322 return -EFAULT;
341 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > 323 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
342 xcRB->request_control_blk_length) { 324 xcRB->request_control_blk_length)
343 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
344 xcRB->request_control_blk_length);
345 return -EFAULT; 325 return -EFAULT;
346 }
347 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
348 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
349 328
@@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
471 service_rc = msg->cprbx.ccp_rtcode; 450 service_rc = msg->cprbx.ccp_rtcode;
472 if (unlikely(service_rc != 0)) { 451 if (unlikely(service_rc != 0)) {
473 service_rs = msg->cprbx.ccp_rscode; 452 service_rs = msg->cprbx.ccp_rscode;
474 if (service_rc == 8 && service_rs == 66) { 453 if (service_rc == 8 && service_rs == 66)
475 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
476 return -EINVAL; 454 return -EINVAL;
477 } 455 if (service_rc == 8 && service_rs == 65)
478 if (service_rc == 8 && service_rs == 65) {
479 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
480 return -EINVAL; 456 return -EINVAL;
481 } 457 if (service_rc == 8 && service_rs == 770)
482 if (service_rc == 8 && service_rs == 770) {
483 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
484 return -EINVAL; 458 return -EINVAL;
485 }
486 if (service_rc == 8 && service_rs == 783) { 459 if (service_rc == 8 && service_rs == 783) {
487 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
488 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 460 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
489 return -EAGAIN; 461 return -EAGAIN;
490 } 462 }
491 if (service_rc == 12 && service_rs == 769) { 463 if (service_rc == 12 && service_rs == 769)
492 PDEBUG("Invalid key on PCIXCC/CEX2C\n");
493 return -EINVAL; 464 return -EINVAL;
494 }
495 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
496 service_rc, service_rs);
497 zdev->online = 0; 465 zdev->online = 0;
498 return -EAGAIN; /* repeat the request on a different device. */ 466 return -EAGAIN; /* repeat the request on a different device. */
499 } 467 }
@@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
569 } __attribute__((packed)) *msg = reply->message; 537 } __attribute__((packed)) *msg = reply->message;
570 char *data = reply->message; 538 char *data = reply->message;
571 539
572 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { 540 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
573 PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
574 rc, rs);
575 return -EINVAL; 541 return -EINVAL;
576 }
577 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); 542 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
578 return msg->fmt2.count2; 543 return msg->fmt2.count2;
579} 544}
@@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev,
598 outputdata, outputdatalength); 563 outputdata, outputdatalength);
599 /* no break, incorrect cprb version is an unknown response */ 564 /* no break, incorrect cprb version is an unknown response */
600 default: /* Unknown response type, this should NEVER EVER happen */ 565 default: /* Unknown response type, this should NEVER EVER happen */
601 PRINTK("Unrecognized Message Header: %08x%08x\n",
602 *(unsigned int *) reply->message,
603 *(unsigned int *) (reply->message+4));
604 zdev->online = 0; 566 zdev->online = 0;
605 return -EAGAIN; /* repeat the request on a different device. */ 567 return -EAGAIN; /* repeat the request on a different device. */
606 } 568 }
@@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
627 return convert_type86_xcrb(zdev, reply, xcRB); 589 return convert_type86_xcrb(zdev, reply, xcRB);
628 /* no break, incorrect cprb version is an unknown response */ 590 /* no break, incorrect cprb version is an unknown response */
629 default: /* Unknown response type, this should NEVER EVER happen */ 591 default: /* Unknown response type, this should NEVER EVER happen */
630 PRINTK("Unrecognized Message Header: %08x%08x\n",
631 *(unsigned int *) reply->message,
632 *(unsigned int *) (reply->message+4));
633 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 592 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
634 zdev->online = 0; 593 zdev->online = 0;
635 return -EAGAIN; /* repeat the request on a different device. */ 594 return -EAGAIN; /* repeat the request on a different device. */
@@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev,
653 return convert_type86_rng(zdev, reply, data); 612 return convert_type86_rng(zdev, reply, data);
654 /* no break, incorrect cprb version is an unknown response */ 613 /* no break, incorrect cprb version is an unknown response */
655 default: /* Unknown response type, this should NEVER EVER happen */ 614 default: /* Unknown response type, this should NEVER EVER happen */
656 PRINTK("Unrecognized Message Header: %08x%08x\n",
657 *(unsigned int *) reply->message,
658 *(unsigned int *) (reply->message+4));
659 zdev->online = 0; 615 zdev->online = 0;
660 return -EAGAIN; /* repeat the request on a different device. */ 616 return -EAGAIN; /* repeat the request on a different device. */
661 } 617 }
@@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
700 memcpy(msg->message, reply->message, length); 656 memcpy(msg->message, reply->message, length);
701 break; 657 break;
702 default: 658 default:
703 PRINTK("Invalid internal response type: %i\n", 659 memcpy(msg->message, &error_reply, sizeof error_reply);
704 resp_type->type);
705 memcpy(msg->message, &error_reply,
706 sizeof error_reply);
707 } 660 }
708 } else 661 } else
709 memcpy(msg->message, reply->message, sizeof error_reply); 662 memcpy(msg->message, reply->message, sizeof error_reply);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 04a1d7bf678c..c644669a75c2 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev,
703 if (!cdev->dev.driver_data) { 703 if (!cdev->dev.driver_data) {
704 printk(KERN_WARNING "claw: unsolicited interrupt for device:" 704 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
705 "%s received c-%02x d-%02x\n", 705 "%s received c-%02x d-%02x\n",
706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); 706 cdev->dev.bus_id, irb->scsw.cmd.cstat,
707 irb->scsw.cmd.dstat);
707#ifdef FUNCTRACE 708#ifdef FUNCTRACE
708 printk(KERN_INFO "claw: %s() " 709 printk(KERN_INFO "claw: %s() "
709 "exit on line %d\n",__func__,__LINE__); 710 "exit on line %d\n",__func__,__LINE__);
@@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev,
732#ifdef IOTRACE 733#ifdef IOTRACE
733 printk(KERN_INFO "%s: interrupt for device: %04x " 734 printk(KERN_INFO "%s: interrupt for device: %04x "
734 "received c-%02x d-%02x state-%02x\n", 735 "received c-%02x d-%02x state-%02x\n",
735 dev->name, p_ch->devno, irb->scsw.cstat, 736 dev->name, p_ch->devno, irb->scsw.cmd.cstat,
736 irb->scsw.dstat, p_ch->claw_state); 737 irb->scsw.cmd.dstat, p_ch->claw_state);
737#endif 738#endif
738 739
739 /* Copy interruption response block. */ 740 /* Copy interruption response block. */
740 memcpy(p_ch->irb, irb, sizeof(struct irb)); 741 memcpy(p_ch->irb, irb, sizeof(struct irb));
741 742
742 /* Check for good subchannel return code, otherwise error message */ 743 /* Check for good subchannel return code, otherwise error message */
743 if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { 744 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
744 printk(KERN_INFO "%s: subchannel check for device: %04x -" 745 printk(KERN_INFO "%s: subchannel check for device: %04x -"
745 " Sch Stat %02x Dev Stat %02x CPA - %04x\n", 746 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
746 dev->name, p_ch->devno, 747 dev->name, p_ch->devno,
747 irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); 748 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
749 irb->scsw.cmd.cpa);
748#ifdef IOTRACE 750#ifdef IOTRACE
749 dumpit((char *)irb,sizeof(struct irb)); 751 dumpit((char *)irb,sizeof(struct irb));
750 dumpit((char *)(unsigned long)irb->scsw.cpa, 752 dumpit((char *)(unsigned long)irb->scsw.cmd.cpa,
751 sizeof(struct ccw1)); 753 sizeof(struct ccw1));
752#endif 754#endif
753#ifdef FUNCTRACE 755#ifdef FUNCTRACE
@@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev,
759 } 761 }
760 762
761 /* Check the reason-code of a unit check */ 763 /* Check the reason-code of a unit check */
762 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 764 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
763 ccw_check_unit_check(p_ch, irb->ecw[0]); 765 ccw_check_unit_check(p_ch, irb->ecw[0]);
764 }
765 766
766 /* State machine to bring the connection up, down and to restart */ 767 /* State machine to bring the connection up, down and to restart */
767 p_ch->last_dstat = irb->scsw.dstat; 768 p_ch->last_dstat = irb->scsw.cmd.dstat;
768 769
769 switch (p_ch->claw_state) { 770 switch (p_ch->claw_state) {
770 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ 771 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
771#ifdef DEBUGMSG 772#ifdef DEBUGMSG
772 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); 773 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
773#endif 774#endif
774 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 775 if (!((p_ch->irb->scsw.cmd.stctl &
775 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 776 SCSW_STCTL_SEC_STATUS) ||
776 (p_ch->irb->scsw.stctl == 777 (p_ch->irb->scsw.cmd.stctl ==
777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 778 SCSW_STCTL_STATUS_PEND) ||
779 (p_ch->irb->scsw.cmd.stctl ==
780 (SCSW_STCTL_ALERT_STATUS |
781 SCSW_STCTL_STATUS_PEND)))) {
778#ifdef FUNCTRACE 782#ifdef FUNCTRACE
779 printk(KERN_INFO "%s:%s Exit on line %d\n", 783 printk(KERN_INFO "%s:%s Exit on line %d\n",
780 dev->name,__func__,__LINE__); 784 dev->name,__func__,__LINE__);
@@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev,
798 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", 802 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
799 dev->name); 803 dev->name);
800#endif 804#endif
801 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 805 if (!((p_ch->irb->scsw.cmd.stctl &
802 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 806 SCSW_STCTL_SEC_STATUS) ||
803 (p_ch->irb->scsw.stctl == 807 (p_ch->irb->scsw.cmd.stctl ==
804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 808 SCSW_STCTL_STATUS_PEND) ||
809 (p_ch->irb->scsw.cmd.stctl ==
810 (SCSW_STCTL_ALERT_STATUS |
811 SCSW_STCTL_STATUS_PEND)))) {
805#ifdef FUNCTRACE 812#ifdef FUNCTRACE
806 printk(KERN_INFO "%s:%s Exit on line %d\n", 813 printk(KERN_INFO "%s:%s Exit on line %d\n",
807 dev->name,__func__,__LINE__); 814 dev->name,__func__,__LINE__);
@@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev,
828 "interrupt for device:" 835 "interrupt for device:"
829 "%s received c-%02x d-%02x\n", 836 "%s received c-%02x d-%02x\n",
830 cdev->dev.bus_id, 837 cdev->dev.bus_id,
831 irb->scsw.cstat, 838 irb->scsw.cmd.cstat,
832 irb->scsw.dstat); 839 irb->scsw.cmd.dstat);
833 return; 840 return;
834 } 841 }
835#ifdef DEBUGMSG 842#ifdef DEBUGMSG
@@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev,
844 return; 851 return;
845 case CLAW_START_READ: 852 case CLAW_START_READ:
846 CLAW_DBF_TEXT(4,trace,"ReadIRQ"); 853 CLAW_DBF_TEXT(4,trace,"ReadIRQ");
847 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 854 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
848 clear_bit(0, (void *)&p_ch->IO_active); 855 clear_bit(0, (void *)&p_ch->IO_active);
849 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || 856 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
850 (p_ch->irb->ecw[0] & 0x40) == 0x40 || 857 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
@@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev,
863 CLAW_DBF_TEXT(4,trace,"notrdy"); 870 CLAW_DBF_TEXT(4,trace,"notrdy");
864 return; 871 return;
865 } 872 }
866 if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && 873 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
867 (p_ch->irb->scsw.dstat==0)) { 874 (p_ch->irb->scsw.cmd.dstat == 0)) {
868 if (test_and_set_bit(CLAW_BH_ACTIVE, 875 if (test_and_set_bit(CLAW_BH_ACTIVE,
869 (void *)&p_ch->flag_a) == 0) { 876 (void *)&p_ch->flag_a) == 0) {
870 tasklet_schedule(&p_ch->tasklet); 877 tasklet_schedule(&p_ch->tasklet);
@@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev,
879 CLAW_DBF_TEXT(4,trace,"PCI_read"); 886 CLAW_DBF_TEXT(4,trace,"PCI_read");
880 return; 887 return;
881 } 888 }
882 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 889 if (!((p_ch->irb->scsw.cmd.stctl &
883 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 890 SCSW_STCTL_SEC_STATUS) ||
884 (p_ch->irb->scsw.stctl == 891 (p_ch->irb->scsw.cmd.stctl ==
885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 892 SCSW_STCTL_STATUS_PEND) ||
893 (p_ch->irb->scsw.cmd.stctl ==
894 (SCSW_STCTL_ALERT_STATUS |
895 SCSW_STCTL_STATUS_PEND)))) {
886#ifdef FUNCTRACE 896#ifdef FUNCTRACE
887 printk(KERN_INFO "%s:%s Exit on line %d\n", 897 printk(KERN_INFO "%s:%s Exit on line %d\n",
888 dev->name,__func__,__LINE__); 898 dev->name,__func__,__LINE__);
@@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev,
911 CLAW_DBF_TEXT(4,trace,"RdIRQXit"); 921 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
912 return; 922 return;
913 case CLAW_START_WRITE: 923 case CLAW_START_WRITE:
914 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 924 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
915 printk(KERN_INFO "%s: Unit Check Occured in " 925 printk(KERN_INFO "%s: Unit Check Occured in "
916 "write channel\n",dev->name); 926 "write channel\n",dev->name);
917 clear_bit(0, (void *)&p_ch->IO_active); 927 clear_bit(0, (void *)&p_ch->IO_active);
@@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev,
934 CLAW_DBF_TEXT(4,trace,"rstrtwrt"); 944 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
935 return; 945 return;
936 } 946 }
937 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 947 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
938 clear_bit(0, (void *)&p_ch->IO_active); 948 clear_bit(0, (void *)&p_ch->IO_active);
939 printk(KERN_INFO "%s: Unit Exception " 949 printk(KERN_INFO "%s: Unit Exception "
940 "Occured in write channel\n", 950 "Occured in write channel\n",
941 dev->name); 951 dev->name);
942 } 952 }
943 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 953 if (!((p_ch->irb->scsw.cmd.stctl &
944 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 954 SCSW_STCTL_SEC_STATUS) ||
945 (p_ch->irb->scsw.stctl == 955 (p_ch->irb->scsw.cmd.stctl ==
946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 956 SCSW_STCTL_STATUS_PEND) ||
957 (p_ch->irb->scsw.cmd.stctl ==
958 (SCSW_STCTL_ALERT_STATUS |
959 SCSW_STCTL_STATUS_PEND)))) {
947#ifdef FUNCTRACE 960#ifdef FUNCTRACE
948 printk(KERN_INFO "%s:%s Exit on line %d\n", 961 printk(KERN_INFO "%s:%s Exit on line %d\n",
949 dev->name,__func__,__LINE__); 962 dev->name,__func__,__LINE__);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 2a106f3a076d..7e6bd387f4d8 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
257 if (duration > ch->prof.tx_time) 257 if (duration > ch->prof.tx_time)
258 ch->prof.tx_time = duration; 258 ch->prof.tx_time = duration;
259 259
260 if (ch->irb->scsw.count != 0) 260 if (ch->irb->scsw.cmd.count != 0)
261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
262 dev->name, ch->irb->scsw.count); 262 dev->name, ch->irb->scsw.cmd.count);
263 fsm_deltimer(&ch->timer); 263 fsm_deltimer(&ch->timer);
264 while ((skb = skb_dequeue(&ch->io_queue))) { 264 while ((skb = skb_dequeue(&ch->io_queue))) {
265 priv->stats.tx_packets++; 265 priv->stats.tx_packets++;
@@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
353 struct channel *ch = arg; 353 struct channel *ch = arg;
354 struct net_device *dev = ch->netdev; 354 struct net_device *dev = ch->netdev;
355 struct ctcm_priv *priv = dev->priv; 355 struct ctcm_priv *priv = dev->priv;
356 int len = ch->max_bufsize - ch->irb->scsw.count; 356 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
357 struct sk_buff *skb = ch->trans_skb; 357 struct sk_buff *skb = ch->trans_skb;
358 __u16 block_len = *((__u16 *)skb->data); 358 __u16 block_len = *((__u16 *)skb->data);
359 int check_len; 359 int check_len;
@@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1234 if (duration > ch->prof.tx_time) 1234 if (duration > ch->prof.tx_time)
1235 ch->prof.tx_time = duration; 1235 ch->prof.tx_time = duration;
1236 1236
1237 if (ch->irb->scsw.count != 0) 1237 if (ch->irb->scsw.cmd.count != 0)
1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
1239 dev->name, ch->irb->scsw.count); 1239 dev->name, ch->irb->scsw.cmd.count);
1240 fsm_deltimer(&ch->timer); 1240 fsm_deltimer(&ch->timer);
1241 while ((skb = skb_dequeue(&ch->io_queue))) { 1241 while ((skb = skb_dequeue(&ch->io_queue))) {
1242 priv->stats.tx_packets++; 1242 priv->stats.tx_packets++;
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1394 struct sk_buff *skb = ch->trans_skb; 1394 struct sk_buff *skb = ch->trans_skb;
1395 struct sk_buff *new_skb; 1395 struct sk_buff *new_skb;
1396 unsigned long saveflags = 0; /* avoids compiler warning */ 1396 unsigned long saveflags = 0; /* avoids compiler warning */
1397 int len = ch->max_bufsize - ch->irb->scsw.count; 1397 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1398 1398
1399 if (do_debug_data) { 1399 if (do_debug_data) {
1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", 1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index d52843da4f55..6b13c1c1beb8 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1236 /* Check for unsolicited interrupts. */ 1236 /* Check for unsolicited interrupts. */
1237 if (cgdev == NULL) { 1237 if (cgdev == NULL) {
1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", 1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
1239 cdev->dev.bus_id, irb->scsw.cstat, 1239 cdev->dev.bus_id, irb->scsw.cmd.cstat,
1240 irb->scsw.dstat); 1240 irb->scsw.cmd.dstat);
1241 return; 1241 return;
1242 } 1242 }
1243 1243
@@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1266 "received c-%02x d-%02x\n", 1266 "received c-%02x d-%02x\n",
1267 dev->name, 1267 dev->name,
1268 ch->id, 1268 ch->id,
1269 irb->scsw.cstat, 1269 irb->scsw.cmd.cstat,
1270 irb->scsw.dstat); 1270 irb->scsw.cmd.dstat);
1271 1271
1272 /* Copy interruption response block. */ 1272 /* Copy interruption response block. */
1273 memcpy(ch->irb, irb, sizeof(struct irb)); 1273 memcpy(ch->irb, irb, sizeof(struct irb));
1274 1274
1275 /* Check for good subchannel return code, otherwise error message */ 1275 /* Check for good subchannel return code, otherwise error message */
1276 if (irb->scsw.cstat) { 1276 if (irb->scsw.cmd.cstat) {
1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); 1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", 1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
1279 dev->name, ch->id, irb->scsw.cstat, 1279 dev->name, ch->id, irb->scsw.cmd.cstat,
1280 irb->scsw.dstat); 1280 irb->scsw.cmd.dstat);
1281 return; 1281 return;
1282 } 1282 }
1283 1283
1284 /* Check the reason-code of a unit check */ 1284 /* Check the reason-code of a unit check */
1285 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 1285 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
1286 ccw_unit_check(ch, irb->ecw[0]); 1286 ccw_unit_check(ch, irb->ecw[0]);
1287 return; 1287 return;
1288 } 1288 }
1289 if (irb->scsw.dstat & DEV_STAT_BUSY) { 1289 if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
1290 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 1290 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); 1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1292 else 1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); 1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1294 return; 1294 return;
1295 } 1295 }
1296 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1296 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); 1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1298 return; 1298 return;
1299 } 1299 }
1300 if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 1300 if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
1301 (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 1301 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
1302 (irb->scsw.stctl == 1302 (irb->scsw.cmd.stctl ==
1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) 1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); 1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1305 else 1305 else
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 8e7697305a4c..f4a32375c037 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -36,7 +36,6 @@ const char *cu3088_type[] = {
36 "CTC/A", 36 "CTC/A",
37 "ESCON channel", 37 "ESCON channel",
38 "FICON channel", 38 "FICON channel",
39 "P390 LCS card",
40 "OSA LCS card", 39 "OSA LCS card",
41 "CLAW channel device", 40 "CLAW channel device",
42 "unknown channel type", 41 "unknown channel type",
@@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = {
49 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, 48 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
50 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, 49 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
51 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, 50 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
52 { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
53 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, 51 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
54 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, 52 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
55 { /* end of list */ } 53 { /* end of list */ }
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
index 1753661f702a..d8558a7105a5 100644
--- a/drivers/s390/net/cu3088.h
+++ b/drivers/s390/net/cu3088.h
@@ -17,9 +17,6 @@ enum channel_types {
17 /* Device is a FICON channel */ 17 /* Device is a FICON channel */
18 channel_type_ficon, 18 channel_type_ficon,
19 19
20 /* Device is a P390 LCS card */
21 channel_type_p390,
22
23 /* Device is a OSA2 card */ 20 /* Device is a OSA2 card */
24 channel_type_osa2, 21 channel_type_osa2,
25 22
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index dd22f4b37037..6de28385b354 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1327 char *sense; 1327 char *sense;
1328 1328
1329 sense = (char *) irb->ecw; 1329 sense = (char *) irb->ecw;
1330 cstat = irb->scsw.cstat; 1330 cstat = irb->scsw.cmd.cstat;
1331 dstat = irb->scsw.dstat; 1331 dstat = irb->scsw.cmd.dstat;
1332 1332
1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1388 else 1388 else
1389 channel = &card->write; 1389 channel = &card->write;
1390 1390
1391 cstat = irb->scsw.cstat; 1391 cstat = irb->scsw.cmd.cstat;
1392 dstat = irb->scsw.dstat; 1392 dstat = irb->scsw.cmd.dstat;
1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); 1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
1394 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); 1394 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1395 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); 1395 irb->scsw.cmd.dstat);
1396 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1397 irb->scsw.cmd.actl);
1396 1398
1397 /* Check for channel and device errors presented */ 1399 /* Check for channel and device errors presented */
1398 rc = lcs_get_problem(cdev, irb); 1400 rc = lcs_get_problem(cdev, irb);
@@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1410 } 1412 }
1411 /* How far in the ccw chain have we processed? */ 1413 /* How far in the ccw chain have we processed? */
1412 if ((channel->state != LCS_CH_STATE_INIT) && 1414 if ((channel->state != LCS_CH_STATE_INIT) &&
1413 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1415 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) {
1414 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1416 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1415 - channel->ccws; 1417 - channel->ccws;
1416 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1418 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1417 (irb->scsw.cstat & SCHN_STAT_PCI)) 1419 (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1418 /* Bloody io subsystem tells us lies about cpa... */ 1420 /* Bloody io subsystem tells us lies about cpa... */
1419 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1421 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1420 while (channel->io_idx != index) { 1422 while (channel->io_idx != index) {
@@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1425 } 1427 }
1426 } 1428 }
1427 1429
1428 if ((irb->scsw.dstat & DEV_STAT_DEV_END) || 1430 if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1429 (irb->scsw.dstat & DEV_STAT_CHN_END) || 1431 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1430 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) 1432 (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1431 /* Mark channel as stopped. */ 1433 /* Mark channel as stopped. */
1432 channel->state = LCS_CH_STATE_STOPPED; 1434 channel->state = LCS_CH_STATE_STOPPED;
1433 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) 1435 else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1434 /* CCW execution stopped on a suspend bit. */ 1436 /* CCW execution stopped on a suspend bit. */
1435 channel->state = LCS_CH_STATE_SUSPENDED; 1437 channel->state = LCS_CH_STATE_SUSPENDED;
1436 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1438 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1437 if (irb->scsw.cc != 0) { 1439 if (irb->scsw.cmd.cc != 0) {
1438 ccw_device_halt(channel->ccwdev, (addr_t) channel); 1440 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1439 return; 1441 return;
1440 } 1442 }
1441 /* The channel has been stopped by halt_IO. */ 1443 /* The channel has been stopped by halt_IO. */
1442 channel->state = LCS_CH_STATE_HALTED; 1444 channel->state = LCS_CH_STATE_HALTED;
1443 } 1445 }
1444 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1446 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1445 channel->state = LCS_CH_STATE_CLEARED; 1447 channel->state = LCS_CH_STATE_CLEARED;
1446 }
1447 /* Do the rest in the tasklet. */ 1448 /* Do the rest in the tasklet. */
1448 tasklet_schedule(&channel->irq_tasklet); 1449 tasklet_schedule(&channel->irq_tasklet);
1449} 1450}
@@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1761 netif_carrier_off(card->dev); 1762 netif_carrier_off(card->dev);
1762 break; 1763 break;
1763 default: 1764 default:
1764 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); 1765 LCS_DBF_TEXT(5, trace, "noLGWcmd");
1765 break; 1766 break;
1766 } 1767 }
1767 } else 1768 } else
@@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2042 LCS_DBF_TEXT(2, setup, "add_dev"); 2043 LCS_DBF_TEXT(2, setup, "add_dev");
2043 card = lcs_alloc_card(); 2044 card = lcs_alloc_card();
2044 if (!card) { 2045 if (!card) {
2045 PRINT_ERR("Allocation of lcs card failed\n"); 2046 LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
2046 put_device(&ccwgdev->dev); 2047 put_device(&ccwgdev->dev);
2047 return -ENOMEM; 2048 return -ENOMEM;
2048 } 2049 }
2049 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); 2050 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2050 if (ret) { 2051 if (ret) {
2051 PRINT_ERR("Creating attributes failed");
2052 lcs_free_card(card); 2052 lcs_free_card(card);
2053 put_device(&ccwgdev->dev); 2053 put_device(&ccwgdev->dev);
2054 return ret; 2054 return ret;
@@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2140 default: 2140 default:
2141 LCS_DBF_TEXT(3, setup, "errinit"); 2141 LCS_DBF_TEXT(3, setup, "errinit");
2142 PRINT_ERR("LCS: Initialization failed\n"); 2142 PRINT_ERR("LCS: Initialization failed\n");
2143 PRINT_ERR("LCS: No device found!\n");
2144 goto out; 2143 goto out;
2145 } 2144 }
2146 if (!dev) 2145 if (!dev)
@@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2269 if (!card) 2268 if (!card)
2270 return; 2269 return;
2271 2270
2272 PRINT_INFO("Removing lcs group device ....\n");
2273 LCS_DBF_TEXT(3, setup, "remdev"); 2271 LCS_DBF_TEXT(3, setup, "remdev");
2274 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2272 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2275 if (ccwgdev->state == CCWGROUP_ONLINE) { 2273 if (ccwgdev->state == CCWGROUP_ONLINE) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index e4ba6a0372ac..9242b5acc66b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
625 offset += header->next; 625 offset += header->next;
626 header->next -= NETIUCV_HDRLEN; 626 header->next -= NETIUCV_HDRLEN;
627 if (skb_tailroom(pskb) < header->next) { 627 if (skb_tailroom(pskb) < header->next) {
628 PRINT_WARN("%s: Illegal next field in iucv header: "
629 "%d > %d\n",
630 dev->name, header->next, skb_tailroom(pskb));
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", 628 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb)); 629 header->next, skb_tailroom(pskb));
633 return; 630 return;
@@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
636 skb_reset_mac_header(pskb); 633 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len); 634 skb = dev_alloc_skb(pskb->len);
638 if (!skb) { 635 if (!skb) {
639 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
640 dev->name);
641 IUCV_DBF_TEXT(data, 2, 636 IUCV_DBF_TEXT(data, 2,
642 "Out of memory in netiucv_unpack_skb\n"); 637 "Out of memory in netiucv_unpack_skb\n");
643 privptr->stats.rx_dropped++; 638 privptr->stats.rx_dropped++;
@@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
674 669
675 if (!conn->netdev) { 670 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg); 671 iucv_message_reject(conn->path, msg);
677 PRINT_WARN("Received data for unlinked connection\n");
678 IUCV_DBF_TEXT(data, 2, 672 IUCV_DBF_TEXT(data, 2,
679 "Received data for unlinked connection\n"); 673 "Received data for unlinked connection\n");
680 return; 674 return;
@@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
682 if (msg->length > conn->max_buffsize) { 676 if (msg->length > conn->max_buffsize) {
683 iucv_message_reject(conn->path, msg); 677 iucv_message_reject(conn->path, msg);
684 privptr->stats.rx_dropped++; 678 privptr->stats.rx_dropped++;
685 PRINT_WARN("msglen %d > max_buffsize %d\n",
686 msg->length, conn->max_buffsize);
687 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", 679 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize); 680 msg->length, conn->max_buffsize);
689 return; 681 return;
@@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
695 msg->length, NULL); 687 msg->length, NULL);
696 if (rc || msg->length < 5) { 688 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++; 689 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); 690 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return; 691 return;
701 } 692 }
@@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
778 fsm_newstate(fi, CONN_STATE_IDLE); 769 fsm_newstate(fi, CONN_STATE_IDLE);
779 if (privptr) 770 if (privptr)
780 privptr->stats.tx_errors += txpackets; 771 privptr->stats.tx_errors += txpackets;
781 PRINT_WARN("iucv_send returned %08x\n", rc);
782 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 772 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
783 } else { 773 } else {
784 if (privptr) { 774 if (privptr) {
@@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
806 path->flags = 0; 796 path->flags = 0;
807 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); 797 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
808 if (rc) { 798 if (rc) {
809 PRINT_WARN("%s: IUCV accept failed with error %d\n",
810 netdev->name, rc);
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); 799 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return; 800 return;
813 } 801 }
@@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
873 IUCV_DBF_TEXT(trace, 3, __func__); 861 IUCV_DBF_TEXT(trace, 3, __func__);
874 862
875 fsm_newstate(fi, CONN_STATE_STARTWAIT); 863 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n", 864 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
877 conn->netdev->name, conn->userid); 865 conn->netdev->name, conn->userid);
878 866
879 /* 867 /*
@@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg)
968 struct iucv_connection *conn = arg; 956 struct iucv_connection *conn = arg;
969 struct net_device *netdev = conn->netdev; 957 struct net_device *netdev = conn->netdev;
970 958
971 PRINT_WARN("%s: Cannot connect without username\n", netdev->name); 959 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
972 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); 960 netdev->name, conn->userid);
973} 961}
974 962
975static const fsm_node conn_fsm[] = { 963static const fsm_node conn_fsm[] = {
@@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1077 "connection is up and running\n"); 1065 "connection is up and running\n");
1078 break; 1066 break;
1079 case DEV_STATE_STOPWAIT: 1067 case DEV_STATE_STOPWAIT:
1080 PRINT_INFO(
1081 "%s: got connection UP event during shutdown!\n",
1082 dev->name);
1083 IUCV_DBF_TEXT(data, 2, 1068 IUCV_DBF_TEXT(data, 2,
1084 "dev_action_connup: in DEV_STATE_STOPWAIT\n"); 1069 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1085 break; 1070 break;
@@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1174 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + 1159 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1175 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); 1160 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1176 if (!nskb) { 1161 if (!nskb) {
1177 PRINT_WARN("%s: Could not allocate tx_skb\n",
1178 conn->netdev->name);
1179 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); 1162 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1180 rc = -ENOMEM; 1163 rc = -ENOMEM;
1181 return rc; 1164 return rc;
@@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1223 skb_pull(skb, NETIUCV_HDRLEN); 1206 skb_pull(skb, NETIUCV_HDRLEN);
1224 skb_trim(skb, skb->len - NETIUCV_HDRLEN); 1207 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1225 } 1208 }
1226 PRINT_WARN("iucv_send returned %08x\n", rc);
1227 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 1209 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1228 } else { 1210 } else {
1229 if (copied) 1211 if (copied)
@@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1293 * Some sanity checks ... 1275 * Some sanity checks ...
1294 */ 1276 */
1295 if (skb == NULL) { 1277 if (skb == NULL) {
1296 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1297 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); 1278 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1298 privptr->stats.tx_dropped++; 1279 privptr->stats.tx_dropped++;
1299 return 0; 1280 return 0;
1300 } 1281 }
1301 if (skb_headroom(skb) < NETIUCV_HDRLEN) { 1282 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1302 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1303 dev->name, NETIUCV_HDRLEN);
1304 IUCV_DBF_TEXT(data, 2, 1283 IUCV_DBF_TEXT(data, 2,
1305 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); 1284 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1306 dev_kfree_skb(skb); 1285 dev_kfree_skb(skb);
@@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393 1372
1394 IUCV_DBF_TEXT(trace, 3, __func__); 1373 IUCV_DBF_TEXT(trace, 3, __func__);
1395 if (count > 9) { 1374 if (count > 9) {
1396 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1397 IUCV_DBF_TEXT_(setup, 2, 1375 IUCV_DBF_TEXT_(setup, 2,
1398 "%d is length of username\n", (int) count); 1376 "%d is length of username\n", (int) count);
1399 return -EINVAL; 1377 return -EINVAL;
@@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1409 /* trailing lf, grr */ 1387 /* trailing lf, grr */
1410 break; 1388 break;
1411 } 1389 }
1412 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1413 IUCV_DBF_TEXT_(setup, 2, 1390 IUCV_DBF_TEXT_(setup, 2,
1414 "username: invalid character %c\n", *p); 1391 "username: invalid character %c\n", *p);
1415 return -EINVAL; 1392 return -EINVAL;
@@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1421 if (memcmp(username, priv->conn->userid, 9) && 1398 if (memcmp(username, priv->conn->userid, 9) &&
1422 (ndev->flags & (IFF_UP | IFF_RUNNING))) { 1399 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1423 /* username changed while the interface is active. */ 1400 /* username changed while the interface is active. */
1424 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1425 dev->bus_id, priv->conn->userid);
1426 PRINT_WARN("netiucv: user cannot be updated\n");
1427 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); 1401 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1428 return -EBUSY; 1402 return -EPERM;
1429 } 1403 }
1430 read_lock_bh(&iucv_connection_rwlock); 1404 read_lock_bh(&iucv_connection_rwlock);
1431 list_for_each_entry(cp, &iucv_connection_list, list) { 1405 list_for_each_entry(cp, &iucv_connection_list, list) {
1432 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { 1406 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1433 read_unlock_bh(&iucv_connection_rwlock); 1407 read_unlock_bh(&iucv_connection_rwlock);
1434 PRINT_WARN("netiucv: Connection to %s already " 1408 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1435 "exists\n", username); 1409 "to %s already exists\n", username);
1436 return -EEXIST; 1410 return -EEXIST;
1437 } 1411 }
1438 } 1412 }
@@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1466 bs1 = simple_strtoul(buf, &e, 0); 1440 bs1 = simple_strtoul(buf, &e, 0);
1467 1441
1468 if (e && (!isspace(*e))) { 1442 if (e && (!isspace(*e))) {
1469 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1470 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); 1443 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1471 return -EINVAL; 1444 return -EINVAL;
1472 } 1445 }
1473 if (bs1 > NETIUCV_BUFSIZE_MAX) { 1446 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1474 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2, 1447 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too large\n", 1448 "buffer_write: buffer size %d too large\n",
1478 bs1); 1449 bs1);
@@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1480 } 1451 }
1481 if ((ndev->flags & IFF_RUNNING) && 1452 if ((ndev->flags & IFF_RUNNING) &&
1482 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { 1453 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1483 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1484 bs1);
1485 IUCV_DBF_TEXT_(setup, 2, 1454 IUCV_DBF_TEXT_(setup, 2,
1486 "buffer_write: buffer size %d too small\n", 1455 "buffer_write: buffer size %d too small\n",
1487 bs1); 1456 bs1);
1488 return -EINVAL; 1457 return -EINVAL;
1489 } 1458 }
1490 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { 1459 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1491 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1492 bs1);
1493 IUCV_DBF_TEXT_(setup, 2, 1460 IUCV_DBF_TEXT_(setup, 2,
1494 "buffer_write: buffer size %d too small\n", 1461 "buffer_write: buffer size %d too small\n",
1495 bs1); 1462 bs1);
@@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv,
1963 1930
1964 IUCV_DBF_TEXT(trace, 3, __func__); 1931 IUCV_DBF_TEXT(trace, 3, __func__);
1965 if (count>9) { 1932 if (count>9) {
1966 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1967 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); 1933 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1968 return -EINVAL; 1934 return -EINVAL;
1969 } 1935 }
@@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv,
1976 if (*p == '\n') 1942 if (*p == '\n')
1977 /* trailing lf, grr */ 1943 /* trailing lf, grr */
1978 break; 1944 break;
1979 PRINT_WARN("netiucv: Invalid character in username!\n");
1980 IUCV_DBF_TEXT_(setup, 2, 1945 IUCV_DBF_TEXT_(setup, 2,
1981 "conn_write: invalid character %c\n", *p); 1946 "conn_write: invalid character %c\n", *p);
1982 return -EINVAL; 1947 return -EINVAL;
@@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv,
1989 list_for_each_entry(cp, &iucv_connection_list, list) { 1954 list_for_each_entry(cp, &iucv_connection_list, list) {
1990 if (!strncmp(username, cp->userid, 9)) { 1955 if (!strncmp(username, cp->userid, 9)) {
1991 read_unlock_bh(&iucv_connection_rwlock); 1956 read_unlock_bh(&iucv_connection_rwlock);
1992 PRINT_WARN("netiucv: Connection to %s already " 1957 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1993 "exists\n", username); 1958 "to %s already exists\n", username);
1994 return -EEXIST; 1959 return -EEXIST;
1995 } 1960 }
1996 } 1961 }
@@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv,
1998 1963
1999 dev = netiucv_init_netdevice(username); 1964 dev = netiucv_init_netdevice(username);
2000 if (!dev) { 1965 if (!dev) {
2001 PRINT_WARN("netiucv: Could not allocate network device "
2002 "structure for user '%s'\n",
2003 netiucv_printname(username));
2004 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 1966 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2005 return -ENODEV; 1967 return -ENODEV;
2006 } 1968 }
@@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv,
2020 if (rc) 1982 if (rc)
2021 goto out_unreg; 1983 goto out_unreg;
2022 1984
2023 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2024 1985
2025 return count; 1986 return count;
2026 1987
2027out_unreg: 1988out_unreg:
2028 netiucv_unregister_device(priv->dev); 1989 netiucv_unregister_device(priv->dev);
2029out_free_ndev: 1990out_free_ndev:
2030 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2031 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2032 netiucv_free_netdevice(dev); 1991 netiucv_free_netdevice(dev);
2033 return rc; 1992 return rc;
2034} 1993}
@@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv,
2073 PRINT_WARN("netiucv: %s cannot be removed\n", 2032 PRINT_WARN("netiucv: %s cannot be removed\n",
2074 ndev->name); 2033 ndev->name);
2075 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); 2034 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2076 return -EBUSY; 2035 return -EPERM;
2077 } 2036 }
2078 unregister_netdev(ndev); 2037 unregister_netdev(ndev);
2079 netiucv_unregister_device(dev); 2038 netiucv_unregister_device(dev);
2080 return count; 2039 return count;
2081 } 2040 }
2082 read_unlock_bh(&iucv_connection_rwlock); 2041 read_unlock_bh(&iucv_connection_rwlock);
2083 PRINT_WARN("netiucv: net device %s unknown\n", name);
2084 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); 2042 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2085 return -EINVAL; 2043 return -EINVAL;
2086} 2044}
@@ -2148,7 +2106,6 @@ static int __init netiucv_init(void)
2148 netiucv_driver.groups = netiucv_drv_attr_groups; 2106 netiucv_driver.groups = netiucv_drv_attr_groups;
2149 rc = driver_register(&netiucv_driver); 2107 rc = driver_register(&netiucv_driver);
2150 if (rc) { 2108 if (rc) {
2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
2152 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); 2109 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2153 goto out_iucv; 2110 goto out_iucv;
2154 } 2111 }
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9a71dae223e8..0ac54dc638c2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
420 QETH_DBF_TEXT(TRACE, 3, "urla"); 420 QETH_DBF_TEXT(TRACE, 3, "urla");
421 break; 421 break;
422 default: 422 default:
423 PRINT_WARN("Received data is IPA " 423 QETH_DBF_MESSAGE(2, "Received data is IPA "
424 "but not a reply!\n"); 424 "but not a reply!\n");
425 break; 425 break;
426 } 426 }
@@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
735 char *sense; 735 char *sense;
736 736
737 sense = (char *) irb->ecw; 737 sense = (char *) irb->ecw;
738 cstat = irb->scsw.cstat; 738 cstat = irb->scsw.cmd.cstat;
739 dstat = irb->scsw.dstat; 739 dstat = irb->scsw.cmd.dstat;
740 740
741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
823 823
824 if (__qeth_check_irb_error(cdev, intparm, irb)) 824 if (__qeth_check_irb_error(cdev, intparm, irb))
825 return; 825 return;
826 cstat = irb->scsw.cstat; 826 cstat = irb->scsw.cmd.cstat;
827 dstat = irb->scsw.dstat; 827 dstat = irb->scsw.cmd.dstat;
828 828
829 card = CARD_FROM_CDEV(cdev); 829 card = CARD_FROM_CDEV(cdev);
830 if (!card) 830 if (!card)
@@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
842 } 842 }
843 atomic_set(&channel->irq_pending, 0); 843 atomic_set(&channel->irq_pending, 0);
844 844
845 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) 845 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
846 channel->state = CH_STATE_STOPPED; 846 channel->state = CH_STATE_STOPPED;
847 847
848 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) 848 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
849 channel->state = CH_STATE_HALTED; 849 channel->state = CH_STATE_HALTED;
850 850
851 /*let's wake up immediately on data channel*/ 851 /*let's wake up immediately on data channel*/
@@ -4092,7 +4092,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4092 4092
4093 rc = qeth_determine_card_type(card); 4093 rc = qeth_determine_card_type(card);
4094 if (rc) { 4094 if (rc) {
4095 PRINT_WARN("%s: not a valid card type\n", __func__);
4096 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 4095 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4097 goto err_card; 4096 goto err_card;
4098 } 4097 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 999552c83bbe..06deaee50f6d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
944 else 944 else
945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, 945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
946 addr->del_flags); 946 addr->del_flags);
947 if (rc) { 947 if (rc)
948 QETH_DBF_TEXT(TRACE, 2, "failed"); 948 QETH_DBF_TEXT(TRACE, 2, "failed");
949 /* TODO: re-activate this warning as soon as we have a
950 * clean mirco code
951 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
952 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
953 buf, rc);
954 */
955 }
956 949
957 return rc; 950 return rc;
958} 951}
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 8735a415a116..164e090c2625 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -156,11 +156,8 @@ static int __init smsg_init(void)
156 if (rc != 0) 156 if (rc != 0)
157 goto out; 157 goto out;
158 rc = iucv_register(&smsg_handler, 1); 158 rc = iucv_register(&smsg_handler, 1);
159 if (rc) { 159 if (rc)
160 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
161 rc = -EIO; /* better errno ? */
162 goto out_driver; 160 goto out_driver;
163 }
164 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); 161 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
165 if (!smsg_path) { 162 if (!smsg_path) {
166 rc = -ENOMEM; 163 rc = -ENOMEM;
@@ -168,11 +165,8 @@ static int __init smsg_init(void)
168 } 165 }
169 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 166 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
170 NULL, NULL, NULL); 167 NULL, NULL, NULL);
171 if (rc) { 168 if (rc)
172 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
173 rc = -EIO; /* better errno ? */
174 goto out_free; 169 goto out_free;
175 }
176 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 170 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
177 return 0; 171 return 0;
178 172
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5bfbe7659830..834e9ee7e934 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -2,10 +2,10 @@
2 * drivers/s390/s390mach.c 2 * drivers/s390/s390mach.c
3 * S/390 machine check handler 3 * S/390 machine check handler
4 * 4 *
5 * S390 version 5 * Copyright IBM Corp. 2000,2008
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
@@ -18,10 +18,6 @@
18#include <asm/etr.h> 18#include <asm/etr.h>
19#include <asm/lowcore.h> 19#include <asm/lowcore.h>
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
24#include "cio/chp.h"
25#include "s390mach.h" 21#include "s390mach.h"
26 22
27static struct semaphore m_sem; 23static struct semaphore m_sem;
@@ -36,13 +32,40 @@ s390_handle_damage(char *msg)
36 for(;;); 32 for(;;);
37} 33}
38 34
35static crw_handler_t crw_handlers[NR_RSCS];
36
37/**
38 * s390_register_crw_handler() - register a channel report word handler
39 * @rsc: reporting source code to handle
40 * @handler: handler to be registered
41 *
42 * Returns %0 on success and a negative error value otherwise.
43 */
44int s390_register_crw_handler(int rsc, crw_handler_t handler)
45{
46 if ((rsc < 0) || (rsc >= NR_RSCS))
47 return -EINVAL;
48 if (!cmpxchg(&crw_handlers[rsc], NULL, handler))
49 return 0;
50 return -EBUSY;
51}
52
53/**
54 * s390_unregister_crw_handler() - unregister a channel report word handler
55 * @rsc: reporting source code to handle
56 */
57void s390_unregister_crw_handler(int rsc)
58{
59 if ((rsc < 0) || (rsc >= NR_RSCS))
60 return;
61 xchg(&crw_handlers[rsc], NULL);
62 synchronize_sched();
63}
64
39/* 65/*
40 * Retrieve CRWs and call function to handle event. 66 * Retrieve CRWs and call function to handle event.
41 *
42 * Note : we currently process CRWs for io and chsc subchannels only
43 */ 67 */
44static int 68static int s390_collect_crw_info(void *param)
45s390_collect_crw_info(void *param)
46{ 69{
47 struct crw crw[2]; 70 struct crw crw[2];
48 int ccode; 71 int ccode;
@@ -84,57 +107,24 @@ repeat:
84 crw[chain].rsid); 107 crw[chain].rsid);
85 /* Check for overflows. */ 108 /* Check for overflows. */
86 if (crw[chain].oflw) { 109 if (crw[chain].oflw) {
110 int i;
111
87 pr_debug("%s: crw overflow detected!\n", __func__); 112 pr_debug("%s: crw overflow detected!\n", __func__);
88 css_schedule_eval_all(); 113 for (i = 0; i < NR_RSCS; i++) {
114 if (crw_handlers[i])
115 crw_handlers[i](NULL, NULL, 1);
116 }
89 chain = 0; 117 chain = 0;
90 continue; 118 continue;
91 } 119 }
92 switch (crw[chain].rsc) { 120 if (crw[0].chn && !chain) {
93 case CRW_RSC_SCH: 121 chain++;
94 if (crw[0].chn && !chain) 122 continue;
95 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 break;
99 case CRW_RSC_MONITOR:
100 pr_debug("source is monitoring facility\n");
101 break;
102 case CRW_RSC_CPATH:
103 pr_debug("source is channel path %02X\n", crw[0].rsid);
104 /*
105 * Check for solicited machine checks. These are
106 * created by reset channel path and need not be
107 * reported to the common I/O layer.
108 */
109 if (crw[chain].slct) {
110 pr_debug("solicited machine check for "
111 "channel path %02X\n", crw[0].rsid);
112 break;
113 }
114 switch (crw[0].erc) {
115 case CRW_ERC_IPARM: /* Path has come. */
116 chp_process_crw(crw[0].rsid, 1);
117 break;
118 case CRW_ERC_PERRI: /* Path has gone. */
119 case CRW_ERC_PERRN:
120 chp_process_crw(crw[0].rsid, 0);
121 break;
122 default:
123 pr_debug("Don't know how to handle erc=%x\n",
124 crw[0].erc);
125 }
126 break;
127 case CRW_RSC_CONFIG:
128 pr_debug("source is configuration-alert facility\n");
129 break;
130 case CRW_RSC_CSS:
131 pr_debug("source is channel subsystem\n");
132 chsc_process_crw();
133 break;
134 default:
135 pr_debug("unknown source\n");
136 break;
137 } 123 }
124 if (crw_handlers[crw[chain].rsc])
125 crw_handlers[crw[chain].rsc](&crw[0],
126 chain ? &crw[1] : NULL,
127 0);
138 /* chain is always 0 or 1 here. */ 128 /* chain is always 0 or 1 here. */
139 chain = crw[chain].chn ? chain + 1 : 0; 129 chain = crw[chain].chn ? chain + 1 : 0;
140 } 130 }
@@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs)
468 etr_sync_check(); 458 etr_sync_check();
469 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 459 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
470 etr_switch_to_local(); 460 etr_switch_to_local();
461 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
462 stp_sync_check();
463 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
464 stp_island_check();
471 } 465 }
472 466
473 if (mci->se) 467 if (mci->se)
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index ca681f9b67fc..d39f8b697d27 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -72,6 +72,13 @@ struct crw {
72 __u32 rsid : 16; /* reporting-source ID */ 72 __u32 rsid : 16; /* reporting-source ID */
73} __attribute__ ((packed)); 73} __attribute__ ((packed));
74 74
75typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
76
77extern int s390_register_crw_handler(int rsc, crw_handler_t handler);
78extern void s390_unregister_crw_handler(int rsc);
79
80#define NR_RSCS 16
81
75#define CRW_RSC_MONITOR 0x2 /* monitoring facility */ 82#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
76#define CRW_RSC_SCH 0x3 /* subchannel */ 83#define CRW_RSC_SCH 0x3 /* subchannel */
77#define CRW_RSC_CPATH 0x4 /* channel path */ 84#define CRW_RSC_CPATH 0x4 /* channel path */
@@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw )
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */ 112#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 113#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107 114
115#define ED_STP_SYNC 7 /* External damage STP sync check */
116#define ED_STP_ISLAND 6 /* External damage STP island check */
117
108struct pt_regs; 118struct pt_regs;
109 119
110void s390_handle_mcck(void); 120void s390_handle_mcck(void);
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 03c966059471..bba21e053a1b 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/major.h> 21#include <linux/major.h>
22#include <linux/smp_lock.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
@@ -429,6 +430,7 @@ static int bpp_open(struct inode *inode, struct file *f)
429 unsigned minor = iminor(inode); 430 unsigned minor = iminor(inode);
430 int ret; 431 int ret;
431 432
433 lock_kernel();
432 spin_lock(&bpp_open_lock); 434 spin_lock(&bpp_open_lock);
433 ret = 0; 435 ret = 0;
434 if (minor >= BPP_NO) { 436 if (minor >= BPP_NO) {
@@ -444,6 +446,7 @@ static int bpp_open(struct inode *inode, struct file *f)
444 } 446 }
445 } 447 }
446 spin_unlock(&bpp_open_lock); 448 spin_unlock(&bpp_open_lock);
449 unlock_kernel();
447 450
448 return ret; 451 return ret;
449} 452}
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index 235703414370..23abfdfb44f1 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -279,6 +279,7 @@ static inline int wd_opt_timeout(void)
279 279
280static int wd_open(struct inode *inode, struct file *f) 280static int wd_open(struct inode *inode, struct file *f)
281{ 281{
282 lock_kernel();
282 switch(iminor(inode)) 283 switch(iminor(inode))
283 { 284 {
284 case WD0_MINOR: 285 case WD0_MINOR:
@@ -291,6 +292,7 @@ static int wd_open(struct inode *inode, struct file *f)
291 f->private_data = &wd_dev.watchdog[WD2_ID]; 292 f->private_data = &wd_dev.watchdog[WD2_ID];
292 break; 293 break;
293 default: 294 default:
295 unlock_kernel();
294 return(-ENODEV); 296 return(-ENODEV);
295 } 297 }
296 298
@@ -304,11 +306,13 @@ static int wd_open(struct inode *inode, struct file *f)
304 (void *)wd_dev.regs)) { 306 (void *)wd_dev.regs)) {
305 printk("%s: Cannot register IRQ %d\n", 307 printk("%s: Cannot register IRQ %d\n",
306 WD_OBPNAME, wd_dev.irq); 308 WD_OBPNAME, wd_dev.irq);
309 unlock_kernel();
307 return(-EBUSY); 310 return(-EBUSY);
308 } 311 }
309 wd_dev.initialized = 1; 312 wd_dev.initialized = 1;
310 } 313 }
311 314
315 unlock_kernel();
312 return(nonseekable_open(inode, f)); 316 return(nonseekable_open(inode, f));
313} 317}
314 318
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 3279a1b6501d..d8f5c0ca236d 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -94,6 +94,7 @@ static int d7s_open(struct inode *inode, struct file *f)
94{ 94{
95 if (D7S_MINOR != iminor(inode)) 95 if (D7S_MINOR != iminor(inode))
96 return -ENODEV; 96 return -ENODEV;
97 cycle_kernel_lock();
97 atomic_inc(&d7s_users); 98 atomic_inc(&d7s_users);
98 return 0; 99 return 0;
99} 100}
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index dadabef116b6..a408402426f8 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -27,6 +27,7 @@
27#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29#include <linux/reboot.h> 29#include <linux/reboot.h>
30#include <linux/smp_lock.h>
30 31
31#include <asm/ebus.h> 32#include <asm/ebus.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
@@ -694,6 +695,7 @@ envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
694static int 695static int
695envctrl_open(struct inode *inode, struct file *file) 696envctrl_open(struct inode *inode, struct file *file)
696{ 697{
698 cycle_kernel_lock();
697 file->private_data = NULL; 699 file->private_data = NULL;
698 return 0; 700 return 0;
699} 701}
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 44e039865aa9..7d95e151513a 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -127,9 +127,13 @@ flash_read(struct file * file, char __user * buf,
127static int 127static int
128flash_open(struct inode *inode, struct file *file) 128flash_open(struct inode *inode, struct file *file)
129{ 129{
130 if (test_and_set_bit(0, (void *)&flash.busy) != 0) 130 lock_kernel();
131 if (test_and_set_bit(0, (void *)&flash.busy) != 0) {
132 unlock_kernel();
131 return -EBUSY; 133 return -EBUSY;
134 }
132 135
136 unlock_kernel();
133 return 0; 137 return 0;
134} 138}
135 139
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 4b7079fdc10c..2bec9ccc0293 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/smp_lock.h>
30#include <linux/types.h> 31#include <linux/types.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
@@ -417,11 +418,17 @@ static int jsf_mmap(struct file * file, struct vm_area_struct * vma)
417 418
418static int jsf_open(struct inode * inode, struct file * filp) 419static int jsf_open(struct inode * inode, struct file * filp)
419{ 420{
420 421 lock_kernel();
421 if (jsf0.base == 0) return -ENXIO; 422 if (jsf0.base == 0) {
422 if (test_and_set_bit(0, (void *)&jsf0.busy) != 0) 423 unlock_kernel();
424 return -ENXIO;
425 }
426 if (test_and_set_bit(0, (void *)&jsf0.busy) != 0) {
427 unlock_kernel();
423 return -EBUSY; 428 return -EBUSY;
429 }
424 430
431 unlock_kernel();
425 return 0; /* XXX What security? */ 432 return 0; /* XXX What security? */
426} 433}
427 434
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index fbfeb89a6f32..29dc735e1a20 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -33,6 +33,7 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/smp_lock.h>
36#include <linux/string.h> 37#include <linux/string.h>
37#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
38#include <linux/init.h> 39#include <linux/init.h>
@@ -689,9 +690,11 @@ static int openprom_open(struct inode * inode, struct file * file)
689 if (!data) 690 if (!data)
690 return -ENOMEM; 691 return -ENOMEM;
691 692
693 lock_kernel();
692 data->current_node = of_find_node_by_path("/"); 694 data->current_node = of_find_node_by_path("/");
693 data->lastnode = data->current_node; 695 data->lastnode = data->current_node;
694 file->private_data = (void *) data; 696 file->private_data = (void *) data;
697 unlock_kernel();
695 698
696 return 0; 699 return 0;
697} 700}
diff --git a/drivers/sbus/char/riowatchdog.c b/drivers/sbus/char/riowatchdog.c
index a2fc6b8c1334..88c0fc6395e1 100644
--- a/drivers/sbus/char/riowatchdog.c
+++ b/drivers/sbus/char/riowatchdog.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/miscdevice.h> 13#include <linux/miscdevice.h>
14#include <linux/smp_lock.h>
14 15
15#include <asm/io.h> 16#include <asm/io.h>
16#include <asm/ebus.h> 17#include <asm/ebus.h>
@@ -116,6 +117,7 @@ static void riowd_starttimer(void)
116 117
117static int riowd_open(struct inode *inode, struct file *filp) 118static int riowd_open(struct inode *inode, struct file *filp)
118{ 119{
120 cycle_kernel_lock();
119 nonseekable_open(inode, filp); 121 nonseekable_open(inode, filp);
120 return 0; 122 return 0;
121} 123}
diff --git a/drivers/sbus/char/rtc.c b/drivers/sbus/char/rtc.c
index 18d18f1a114e..b0429917154d 100644
--- a/drivers/sbus/char/rtc.c
+++ b/drivers/sbus/char/rtc.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/smp_lock.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/errno.h> 17#include <linux/errno.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -213,6 +214,7 @@ static int rtc_open(struct inode *inode, struct file *file)
213{ 214{
214 int ret; 215 int ret;
215 216
217 lock_kernel();
216 spin_lock_irq(&mostek_lock); 218 spin_lock_irq(&mostek_lock);
217 if (rtc_busy) { 219 if (rtc_busy) {
218 ret = -EBUSY; 220 ret = -EBUSY;
@@ -221,6 +223,7 @@ static int rtc_open(struct inode *inode, struct file *file)
221 ret = 0; 223 ret = 0;
222 } 224 }
223 spin_unlock_irq(&mostek_lock); 225 spin_unlock_irq(&mostek_lock);
226 unlock_kernel();
224 227
225 return ret; 228 return ret;
226} 229}
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 383f32c1d347..513ba61ae966 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -9,6 +9,7 @@
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/smp_lock.h>
12#include <linux/ioport.h> 13#include <linux/ioport.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
@@ -211,8 +212,10 @@ uctrl_ioctl(struct inode *inode, struct file *file,
211static int 212static int
212uctrl_open(struct inode *inode, struct file *file) 213uctrl_open(struct inode *inode, struct file *file)
213{ 214{
215 lock_kernel();
214 uctrl_get_event_status(); 216 uctrl_get_event_status();
215 uctrl_get_external_status(); 217 uctrl_get_external_status();
218 unlock_kernel();
216 return 0; 219 return 0;
217} 220}
218 221
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index d4f8fcded51d..1f6cb8ae2784 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -24,6 +24,7 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/smp_lock.h>
27 28
28#include <asm/openprom.h> 29#include <asm/openprom.h>
29#include <asm/oplib.h> 30#include <asm/oplib.h>
@@ -178,14 +179,17 @@ static int vfc_open(struct inode *inode, struct file *file)
178{ 179{
179 struct vfc_dev *dev; 180 struct vfc_dev *dev;
180 181
182 lock_kernel();
181 spin_lock(&vfc_dev_lock); 183 spin_lock(&vfc_dev_lock);
182 dev = vfc_get_dev_ptr(iminor(inode)); 184 dev = vfc_get_dev_ptr(iminor(inode));
183 if (dev == NULL) { 185 if (dev == NULL) {
184 spin_unlock(&vfc_dev_lock); 186 spin_unlock(&vfc_dev_lock);
187 unlock_kernel();
185 return -ENODEV; 188 return -ENODEV;
186 } 189 }
187 if (dev->busy) { 190 if (dev->busy) {
188 spin_unlock(&vfc_dev_lock); 191 spin_unlock(&vfc_dev_lock);
192 unlock_kernel();
189 return -EBUSY; 193 return -EBUSY;
190 } 194 }
191 195
@@ -202,6 +206,7 @@ static int vfc_open(struct inode *inode, struct file *file)
202 vfc_captstat_reset(dev); 206 vfc_captstat_reset(dev);
203 207
204 vfc_unlock_device(dev); 208 vfc_unlock_device(dev);
209 unlock_kernel();
205 return 0; 210 return 0;
206} 211}
207 212
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 867f6fd5c2c0..7045511f9ad2 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -84,6 +84,7 @@
84#include <linux/pci.h> 84#include <linux/pci.h>
85#include <linux/time.h> 85#include <linux/time.h>
86#include <linux/mutex.h> 86#include <linux/mutex.h>
87#include <linux/smp_lock.h>
87#include <asm/io.h> 88#include <asm/io.h>
88#include <asm/irq.h> 89#include <asm/irq.h>
89#include <asm/uaccess.h> 90#include <asm/uaccess.h>
@@ -862,11 +863,13 @@ out:
862} /* End twa_chrdev_ioctl() */ 863} /* End twa_chrdev_ioctl() */
863 864
864/* This function handles open for the character device */ 865/* This function handles open for the character device */
866/* NOTE that this function will race with remove. */
865static int twa_chrdev_open(struct inode *inode, struct file *file) 867static int twa_chrdev_open(struct inode *inode, struct file *file)
866{ 868{
867 unsigned int minor_number; 869 unsigned int minor_number;
868 int retval = TW_IOCTL_ERROR_OS_ENODEV; 870 int retval = TW_IOCTL_ERROR_OS_ENODEV;
869 871
872 cycle_kernel_lock();
870 minor_number = iminor(inode); 873 minor_number = iminor(inode);
871 if (minor_number >= twa_device_extension_count) 874 if (minor_number >= twa_device_extension_count)
872 goto out; 875 goto out;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 8c22329aa85e..a0537f09aa21 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -198,6 +198,7 @@
198 198
199#include <linux/module.h> 199#include <linux/module.h>
200#include <linux/reboot.h> 200#include <linux/reboot.h>
201#include <linux/smp_lock.h>
201#include <linux/spinlock.h> 202#include <linux/spinlock.h>
202#include <linux/interrupt.h> 203#include <linux/interrupt.h>
203#include <linux/moduleparam.h> 204#include <linux/moduleparam.h>
@@ -1027,10 +1028,12 @@ out:
1027} /* End tw_chrdev_ioctl() */ 1028} /* End tw_chrdev_ioctl() */
1028 1029
1029/* This function handles open for the character device */ 1030/* This function handles open for the character device */
1031/* NOTE that this function races with remove. */
1030static int tw_chrdev_open(struct inode *inode, struct file *file) 1032static int tw_chrdev_open(struct inode *inode, struct file *file)
1031{ 1033{
1032 unsigned int minor_number; 1034 unsigned int minor_number;
1033 1035
1036 cycle_kernel_lock();
1034 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); 1037 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
1035 1038
1036 minor_number = iminor(inode); 1039 minor_number = iminor(inode);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 1f7c83607f84..68c140e82673 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/smp_lock.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
42#include <linux/syscalls.h> 43#include <linux/syscalls.h>
43#include <linux/delay.h> 44#include <linux/delay.h>
@@ -667,6 +668,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
667 unsigned minor_number = iminor(inode); 668 unsigned minor_number = iminor(inode);
668 int err = -ENODEV; 669 int err = -ENODEV;
669 670
671 lock_kernel(); /* BKL pushdown: nothing else protects this list */
670 list_for_each_entry(aac, &aac_devices, entry) { 672 list_for_each_entry(aac, &aac_devices, entry) {
671 if (aac->id == minor_number) { 673 if (aac->id == minor_number) {
672 file->private_data = aac; 674 file->private_data = aac;
@@ -674,6 +676,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
674 break; 676 break;
675 } 677 }
676 } 678 }
679 unlock_kernel();
677 680
678 return err; 681 return err;
679} 682}
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index 7236143941f3..a8587f1f5e7e 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config SCSI_ACORNSCSI_3 4config SCSI_ACORNSCSI_3
5 tristate "Acorn SCSI card (aka30) support" 5 tristate "Acorn SCSI card (aka30) support"
6 depends on ARCH_ACORN && SCSI && BROKEN 6 depends on ARCH_ACORN && SCSI
7 select SCSI_SPI_ATTRS 7 select SCSI_SPI_ATTRS
8 help 8 help
9 This enables support for the Acorn SCSI card (aka30). If you have an 9 This enables support for the Acorn SCSI card (aka30). If you have an
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 93467e6ac923..5cebe3105260 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -10,17 +10,10 @@
10#include <asm/assembler.h> 10#include <asm/assembler.h>
11#include <asm/hardware.h> 11#include <asm/hardware.h>
12 12
13#if (IO_BASE == (PCIO_BASE & 0xff000000)) 13#if defined(__APCS_32__)
14#define ADDR(off,reg) \ 14#define LOADREGS(t,r,l...) ldm##t r, l
15 tst off, $0x80000000 ;\ 15#elif defined(__APCS_26__)
16 mov reg, $IO_BASE ;\ 16#define LOADREGS(t,r,l...) ldm##t r, l##^
17 orreq reg, reg, $(PCIO_BASE & 0x00ff0000)
18#else
19#define ADDR(off,reg) \
20 tst off, $0x80000000 ;\
21 movne reg, $IO_BASE ;\
22 moveq reg, $(PCIO_BASE & 0xff000000) ;\
23 orreq reg, reg, $(PCIO_BASE & 0x00ff0000)
24#endif 17#endif
25 18
26@ Purpose: transfer a block of data from the acorn scsi card to memory 19@ Purpose: transfer a block of data from the acorn scsi card to memory
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 8e53f02cc311..918ccf818757 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -123,12 +123,6 @@
123#define DBG(cmd,xxx...) xxx 123#define DBG(cmd,xxx...) xxx
124#endif 124#endif
125 125
126#ifndef STRINGIFY
127#define STRINGIFY(x) #x
128#endif
129#define STRx(x) STRINGIFY(x)
130#define NO_WRITE_STR STRx(NO_WRITE)
131
132#include <linux/module.h> 126#include <linux/module.h>
133#include <linux/kernel.h> 127#include <linux/kernel.h>
134#include <linux/string.h> 128#include <linux/string.h>
@@ -141,9 +135,10 @@
141#include <linux/interrupt.h> 135#include <linux/interrupt.h>
142#include <linux/init.h> 136#include <linux/init.h>
143#include <linux/bitops.h> 137#include <linux/bitops.h>
138#include <linux/stringify.h>
139#include <linux/io.h>
144 140
145#include <asm/system.h> 141#include <asm/system.h>
146#include <asm/io.h>
147#include <asm/ecard.h> 142#include <asm/ecard.h>
148 143
149#include "../scsi.h" 144#include "../scsi.h"
@@ -203,44 +198,46 @@ static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
203 * Miscellaneous 198 * Miscellaneous
204 */ 199 */
205 200
206static inline void 201/* Offsets from MEMC base */
207sbic_arm_write(unsigned int io_port, int reg, int value) 202#define SBIC_REGIDX 0x2000
203#define SBIC_REGVAL 0x2004
204#define DMAC_OFFSET 0x3000
205
206/* Offsets from FAST IOC base */
207#define INT_REG 0x2000
208#define PAGE_REG 0x3000
209
210static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value)
208{ 211{
209 __raw_writeb(reg, io_port); 212 writeb(reg, host->base + SBIC_REGIDX);
210 __raw_writeb(value, io_port + 4); 213 writeb(value, host->base + SBIC_REGVAL);
211} 214}
212 215
213#define sbic_arm_writenext(io,val) \ 216static inline int sbic_arm_read(AS_Host *host, unsigned int reg)
214 __raw_writeb((val), (io) + 4)
215
216static inline
217int sbic_arm_read(unsigned int io_port, int reg)
218{ 217{
219 if(reg == SBIC_ASR) 218 if(reg == SBIC_ASR)
220 return __raw_readl(io_port) & 255; 219 return readl(host->base + SBIC_REGIDX) & 255;
221 __raw_writeb(reg, io_port); 220 writeb(reg, host->base + SBIC_REGIDX);
222 return __raw_readl(io_port + 4) & 255; 221 return readl(host->base + SBIC_REGVAL) & 255;
223} 222}
224 223
225#define sbic_arm_readnext(io) \ 224#define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL)
226 __raw_readb((io) + 4) 225#define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL)
227 226
228#ifdef USE_DMAC 227#ifdef USE_DMAC
229#define dmac_read(io_port,reg) \ 228#define dmac_read(host,reg) \
230 inb((io_port) + (reg)) 229 readb((host)->base + DMAC_OFFSET + ((reg) << 2))
231 230
232#define dmac_write(io_port,reg,value) \ 231#define dmac_write(host,reg,value) \
233 ({ outb((value), (io_port) + (reg)); }) 232 ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); })
234 233
235#define dmac_clearintr(io_port) \ 234#define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG)
236 ({ outb(0, (io_port)); })
237 235
238static inline 236static inline unsigned int dmac_address(AS_Host *host)
239unsigned int dmac_address(unsigned int io_port)
240{ 237{
241 return dmac_read(io_port, DMAC_TXADRHI) << 16 | 238 return dmac_read(host, DMAC_TXADRHI) << 16 |
242 dmac_read(io_port, DMAC_TXADRMD) << 8 | 239 dmac_read(host, DMAC_TXADRMD) << 8 |
243 dmac_read(io_port, DMAC_TXADRLO); 240 dmac_read(host, DMAC_TXADRLO);
244} 241}
245 242
246static 243static
@@ -248,15 +245,15 @@ void acornscsi_dumpdma(AS_Host *host, char *where)
248{ 245{
249 unsigned int mode, addr, len; 246 unsigned int mode, addr, len;
250 247
251 mode = dmac_read(host->dma.io_port, DMAC_MODECON); 248 mode = dmac_read(host, DMAC_MODECON);
252 addr = dmac_address(host->dma.io_port); 249 addr = dmac_address(host);
253 len = dmac_read(host->dma.io_port, DMAC_TXCNTHI) << 8 | 250 len = dmac_read(host, DMAC_TXCNTHI) << 8 |
254 dmac_read(host->dma.io_port, DMAC_TXCNTLO); 251 dmac_read(host, DMAC_TXCNTLO);
255 252
256 printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ", 253 printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ",
257 host->host->host_no, where, 254 host->host->host_no, where,
258 mode, addr, (len + 1) & 0xffff, 255 mode, addr, (len + 1) & 0xffff,
259 dmac_read(host->dma.io_port, DMAC_MASKREG)); 256 dmac_read(host, DMAC_MASKREG));
260 257
261 printk("DMA @%06x, ", host->dma.start_addr); 258 printk("DMA @%06x, ", host->dma.start_addr);
262 printk("BH @%p +%04x, ", host->scsi.SCp.ptr, 259 printk("BH @%p +%04x, ", host->scsi.SCp.ptr,
@@ -272,9 +269,9 @@ unsigned long acornscsi_sbic_xfcount(AS_Host *host)
272{ 269{
273 unsigned long length; 270 unsigned long length;
274 271
275 length = sbic_arm_read(host->scsi.io_port, SBIC_TRANSCNTH) << 16; 272 length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16;
276 length |= sbic_arm_readnext(host->scsi.io_port) << 8; 273 length |= sbic_arm_readnext(host) << 8;
277 length |= sbic_arm_readnext(host->scsi.io_port); 274 length |= sbic_arm_readnext(host);
278 275
279 return length; 276 return length;
280} 277}
@@ -285,7 +282,7 @@ acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *m
285 int asr; 282 int asr;
286 283
287 do { 284 do {
288 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 285 asr = sbic_arm_read(host, SBIC_ASR);
289 286
290 if ((asr & stat_mask) == stat) 287 if ((asr & stat_mask) == stat)
291 return 0; 288 return 0;
@@ -304,7 +301,7 @@ int acornscsi_sbic_issuecmd(AS_Host *host, int command)
304 if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command")) 301 if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command"))
305 return -1; 302 return -1;
306 303
307 sbic_arm_write(host->scsi.io_port, SBIC_CMND, command); 304 sbic_arm_write(host, SBIC_CMND, command);
308 305
309 return 0; 306 return 0;
310} 307}
@@ -331,20 +328,20 @@ void acornscsi_resetcard(AS_Host *host)
331 328
332 /* assert reset line */ 329 /* assert reset line */
333 host->card.page_reg = 0x80; 330 host->card.page_reg = 0x80;
334 outb(host->card.page_reg, host->card.io_page); 331 writeb(host->card.page_reg, host->fast + PAGE_REG);
335 332
336 /* wait 3 cs. SCSI standard says 25ms. */ 333 /* wait 3 cs. SCSI standard says 25ms. */
337 acornscsi_csdelay(3); 334 acornscsi_csdelay(3);
338 335
339 host->card.page_reg = 0; 336 host->card.page_reg = 0;
340 outb(host->card.page_reg, host->card.io_page); 337 writeb(host->card.page_reg, host->fast + PAGE_REG);
341 338
342 /* 339 /*
343 * Should get a reset from the card 340 * Should get a reset from the card
344 */ 341 */
345 timeout = 1000; 342 timeout = 1000;
346 do { 343 do {
347 if (inb(host->card.io_intr) & 8) 344 if (readb(host->fast + INT_REG) & 8)
348 break; 345 break;
349 udelay(1); 346 udelay(1);
350 } while (--timeout); 347 } while (--timeout);
@@ -353,19 +350,19 @@ void acornscsi_resetcard(AS_Host *host)
353 printk("scsi%d: timeout while resetting card\n", 350 printk("scsi%d: timeout while resetting card\n",
354 host->host->host_no); 351 host->host->host_no);
355 352
356 sbic_arm_read(host->scsi.io_port, SBIC_ASR); 353 sbic_arm_read(host, SBIC_ASR);
357 sbic_arm_read(host->scsi.io_port, SBIC_SSR); 354 sbic_arm_read(host, SBIC_SSR);
358 355
359 /* setup sbic - WD33C93A */ 356 /* setup sbic - WD33C93A */
360 sbic_arm_write(host->scsi.io_port, SBIC_OWNID, OWNID_EAF | host->host->this_id); 357 sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
361 sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_RESET); 358 sbic_arm_write(host, SBIC_CMND, CMND_RESET);
362 359
363 /* 360 /*
364 * Command should cause a reset interrupt 361 * Command should cause a reset interrupt
365 */ 362 */
366 timeout = 1000; 363 timeout = 1000;
367 do { 364 do {
368 if (inb(host->card.io_intr) & 8) 365 if (readb(host->fast + INT_REG) & 8)
369 break; 366 break;
370 udelay(1); 367 udelay(1);
371 } while (--timeout); 368 } while (--timeout);
@@ -374,26 +371,26 @@ void acornscsi_resetcard(AS_Host *host)
374 printk("scsi%d: timeout while resetting card\n", 371 printk("scsi%d: timeout while resetting card\n",
375 host->host->host_no); 372 host->host->host_no);
376 373
377 sbic_arm_read(host->scsi.io_port, SBIC_ASR); 374 sbic_arm_read(host, SBIC_ASR);
378 if (sbic_arm_read(host->scsi.io_port, SBIC_SSR) != 0x01) 375 if (sbic_arm_read(host, SBIC_SSR) != 0x01)
379 printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n", 376 printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n",
380 host->host->host_no); 377 host->host->host_no);
381 378
382 sbic_arm_write(host->scsi.io_port, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); 379 sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
383 sbic_arm_write(host->scsi.io_port, SBIC_TIMEOUT, TIMEOUT_TIME); 380 sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
384 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); 381 sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
385 sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); 382 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
386 383
387 host->card.page_reg = 0x40; 384 host->card.page_reg = 0x40;
388 outb(host->card.page_reg, host->card.io_page); 385 writeb(host->card.page_reg, host->fast + PAGE_REG);
389 386
390 /* setup dmac - uPC71071 */ 387 /* setup dmac - uPC71071 */
391 dmac_write(host->dma.io_port, DMAC_INIT, 0); 388 dmac_write(host, DMAC_INIT, 0);
392#ifdef USE_DMAC 389#ifdef USE_DMAC
393 dmac_write(host->dma.io_port, DMAC_INIT, INIT_8BIT); 390 dmac_write(host, DMAC_INIT, INIT_8BIT);
394 dmac_write(host->dma.io_port, DMAC_CHANNEL, CHANNEL_0); 391 dmac_write(host, DMAC_CHANNEL, CHANNEL_0);
395 dmac_write(host->dma.io_port, DMAC_DEVCON0, INIT_DEVCON0); 392 dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0);
396 dmac_write(host->dma.io_port, DMAC_DEVCON1, INIT_DEVCON1); 393 dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1);
397#endif 394#endif
398 395
399 host->SCpnt = NULL; 396 host->SCpnt = NULL;
@@ -741,9 +738,9 @@ intr_ret_t acornscsi_kick(AS_Host *host)
741 * If we have an interrupt pending, then we may have been reselected. 738 * If we have an interrupt pending, then we may have been reselected.
742 * In this case, we don't want to write to the registers 739 * In this case, we don't want to write to the registers
743 */ 740 */
744 if (!(sbic_arm_read(host->scsi.io_port, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) { 741 if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) {
745 sbic_arm_write(host->scsi.io_port, SBIC_DESTID, SCpnt->device->id); 742 sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id);
746 sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_SELWITHATN); 743 sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN);
747 } 744 }
748 745
749 /* 746 /*
@@ -807,7 +804,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
807 struct scsi_cmnd *SCpnt = *SCpntp; 804 struct scsi_cmnd *SCpnt = *SCpntp;
808 805
809 /* clean up */ 806 /* clean up */
810 sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); 807 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
811 808
812 host->stats.fins += 1; 809 host->stats.fins += 1;
813 810
@@ -918,13 +915,13 @@ static
918void acornscsi_data_read(AS_Host *host, char *ptr, 915void acornscsi_data_read(AS_Host *host, char *ptr,
919 unsigned int start_addr, unsigned int length) 916 unsigned int start_addr, unsigned int length)
920{ 917{
921 extern void __acornscsi_in(int port, char *buf, int len); 918 extern void __acornscsi_in(void __iomem *, char *buf, int len);
922 unsigned int page, offset, len = length; 919 unsigned int page, offset, len = length;
923 920
924 page = (start_addr >> 12); 921 page = (start_addr >> 12);
925 offset = start_addr & ((1 << 12) - 1); 922 offset = start_addr & ((1 << 12) - 1);
926 923
927 outb((page & 0x3f) | host->card.page_reg, host->card.io_page); 924 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
928 925
929 while (len > 0) { 926 while (len > 0) {
930 unsigned int this_len; 927 unsigned int this_len;
@@ -934,7 +931,7 @@ void acornscsi_data_read(AS_Host *host, char *ptr,
934 else 931 else
935 this_len = len; 932 this_len = len;
936 933
937 __acornscsi_in(host->card.io_ram + (offset << 1), ptr, this_len); 934 __acornscsi_in(host->base + (offset << 1), ptr, this_len);
938 935
939 offset += this_len; 936 offset += this_len;
940 ptr += this_len; 937 ptr += this_len;
@@ -943,10 +940,10 @@ void acornscsi_data_read(AS_Host *host, char *ptr,
943 if (offset == (1 << 12)) { 940 if (offset == (1 << 12)) {
944 offset = 0; 941 offset = 0;
945 page ++; 942 page ++;
946 outb((page & 0x3f) | host->card.page_reg, host->card.io_page); 943 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
947 } 944 }
948 } 945 }
949 outb(host->card.page_reg, host->card.io_page); 946 writeb(host->card.page_reg, host->fast + PAGE_REG);
950} 947}
951 948
952/* 949/*
@@ -963,13 +960,13 @@ static
963void acornscsi_data_write(AS_Host *host, char *ptr, 960void acornscsi_data_write(AS_Host *host, char *ptr,
964 unsigned int start_addr, unsigned int length) 961 unsigned int start_addr, unsigned int length)
965{ 962{
966 extern void __acornscsi_out(int port, char *buf, int len); 963 extern void __acornscsi_out(void __iomem *, char *buf, int len);
967 unsigned int page, offset, len = length; 964 unsigned int page, offset, len = length;
968 965
969 page = (start_addr >> 12); 966 page = (start_addr >> 12);
970 offset = start_addr & ((1 << 12) - 1); 967 offset = start_addr & ((1 << 12) - 1);
971 968
972 outb((page & 0x3f) | host->card.page_reg, host->card.io_page); 969 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
973 970
974 while (len > 0) { 971 while (len > 0) {
975 unsigned int this_len; 972 unsigned int this_len;
@@ -979,7 +976,7 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
979 else 976 else
980 this_len = len; 977 this_len = len;
981 978
982 __acornscsi_out(host->card.io_ram + (offset << 1), ptr, this_len); 979 __acornscsi_out(host->base + (offset << 1), ptr, this_len);
983 980
984 offset += this_len; 981 offset += this_len;
985 ptr += this_len; 982 ptr += this_len;
@@ -988,10 +985,10 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
988 if (offset == (1 << 12)) { 985 if (offset == (1 << 12)) {
989 offset = 0; 986 offset = 0;
990 page ++; 987 page ++;
991 outb((page & 0x3f) | host->card.page_reg, host->card.io_page); 988 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
992 } 989 }
993 } 990 }
994 outb(host->card.page_reg, host->card.io_page); 991 writeb(host->card.page_reg, host->fast + PAGE_REG);
995} 992}
996 993
997/* ========================================================================================= 994/* =========================================================================================
@@ -1008,8 +1005,8 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
1008static inline 1005static inline
1009void acornscsi_dma_stop(AS_Host *host) 1006void acornscsi_dma_stop(AS_Host *host)
1010{ 1007{
1011 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON); 1008 dmac_write(host, DMAC_MASKREG, MASK_ON);
1012 dmac_clearintr(host->dma.io_intr_clear); 1009 dmac_clearintr(host);
1013 1010
1014#if (DEBUG & DEBUG_DMA) 1011#if (DEBUG & DEBUG_DMA)
1015 DBG(host->SCpnt, acornscsi_dumpdma(host, "stop")); 1012 DBG(host->SCpnt, acornscsi_dumpdma(host, "stop"));
@@ -1031,7 +1028,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
1031 1028
1032 host->dma.direction = direction; 1029 host->dma.direction = direction;
1033 1030
1034 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON); 1031 dmac_write(host, DMAC_MASKREG, MASK_ON);
1035 1032
1036 if (direction == DMA_OUT) { 1033 if (direction == DMA_OUT) {
1037#if (DEBUG & DEBUG_NO_WRITE) 1034#if (DEBUG & DEBUG_NO_WRITE)
@@ -1062,13 +1059,13 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
1062 length); 1059 length);
1063 1060
1064 length -= 1; 1061 length -= 1;
1065 dmac_write(host->dma.io_port, DMAC_TXCNTLO, length); 1062 dmac_write(host, DMAC_TXCNTLO, length);
1066 dmac_write(host->dma.io_port, DMAC_TXCNTHI, length >> 8); 1063 dmac_write(host, DMAC_TXCNTHI, length >> 8);
1067 dmac_write(host->dma.io_port, DMAC_TXADRLO, address); 1064 dmac_write(host, DMAC_TXADRLO, address);
1068 dmac_write(host->dma.io_port, DMAC_TXADRMD, address >> 8); 1065 dmac_write(host, DMAC_TXADRMD, address >> 8);
1069 dmac_write(host->dma.io_port, DMAC_TXADRHI, 0); 1066 dmac_write(host, DMAC_TXADRHI, 0);
1070 dmac_write(host->dma.io_port, DMAC_MODECON, mode); 1067 dmac_write(host, DMAC_MODECON, mode);
1071 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF); 1068 dmac_write(host, DMAC_MASKREG, MASK_OFF);
1072 1069
1073#if (DEBUG & DEBUG_DMA) 1070#if (DEBUG & DEBUG_DMA)
1074 DBG(host->SCpnt, acornscsi_dumpdma(host, "strt")); 1071 DBG(host->SCpnt, acornscsi_dumpdma(host, "strt"));
@@ -1088,8 +1085,8 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
1088static 1085static
1089void acornscsi_dma_cleanup(AS_Host *host) 1086void acornscsi_dma_cleanup(AS_Host *host)
1090{ 1087{
1091 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON); 1088 dmac_write(host, DMAC_MASKREG, MASK_ON);
1092 dmac_clearintr(host->dma.io_intr_clear); 1089 dmac_clearintr(host);
1093 1090
1094 /* 1091 /*
1095 * Check for a pending transfer 1092 * Check for a pending transfer
@@ -1116,7 +1113,7 @@ void acornscsi_dma_cleanup(AS_Host *host)
1116 /* 1113 /*
1117 * Calculate number of bytes transferred from DMA. 1114 * Calculate number of bytes transferred from DMA.
1118 */ 1115 */
1119 transferred = dmac_address(host->dma.io_port) - host->dma.start_addr; 1116 transferred = dmac_address(host) - host->dma.start_addr;
1120 host->dma.transferred += transferred; 1117 host->dma.transferred += transferred;
1121 1118
1122 if (host->dma.direction == DMA_IN) 1119 if (host->dma.direction == DMA_IN)
@@ -1152,13 +1149,13 @@ void acornscsi_dma_intr(AS_Host *host)
1152 DBG(host->SCpnt, acornscsi_dumpdma(host, "inti")); 1149 DBG(host->SCpnt, acornscsi_dumpdma(host, "inti"));
1153#endif 1150#endif
1154 1151
1155 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON); 1152 dmac_write(host, DMAC_MASKREG, MASK_ON);
1156 dmac_clearintr(host->dma.io_intr_clear); 1153 dmac_clearintr(host);
1157 1154
1158 /* 1155 /*
1159 * Calculate amount transferred via DMA 1156 * Calculate amount transferred via DMA
1160 */ 1157 */
1161 transferred = dmac_address(host->dma.io_port) - host->dma.start_addr; 1158 transferred = dmac_address(host) - host->dma.start_addr;
1162 host->dma.transferred += transferred; 1159 host->dma.transferred += transferred;
1163 1160
1164 /* 1161 /*
@@ -1190,12 +1187,12 @@ void acornscsi_dma_intr(AS_Host *host)
1190 length); 1187 length);
1191 1188
1192 length -= 1; 1189 length -= 1;
1193 dmac_write(host->dma.io_port, DMAC_TXCNTLO, length); 1190 dmac_write(host, DMAC_TXCNTLO, length);
1194 dmac_write(host->dma.io_port, DMAC_TXCNTHI, length >> 8); 1191 dmac_write(host, DMAC_TXCNTHI, length >> 8);
1195 dmac_write(host->dma.io_port, DMAC_TXADRLO, address); 1192 dmac_write(host, DMAC_TXADRLO, address);
1196 dmac_write(host->dma.io_port, DMAC_TXADRMD, address >> 8); 1193 dmac_write(host, DMAC_TXADRMD, address >> 8);
1197 dmac_write(host->dma.io_port, DMAC_TXADRHI, 0); 1194 dmac_write(host, DMAC_TXADRHI, 0);
1198 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF); 1195 dmac_write(host, DMAC_MASKREG, MASK_OFF);
1199 1196
1200#if (DEBUG & DEBUG_DMA) 1197#if (DEBUG & DEBUG_DMA)
1201 DBG(host->SCpnt, acornscsi_dumpdma(host, "into")); 1198 DBG(host->SCpnt, acornscsi_dumpdma(host, "into"));
@@ -1209,15 +1206,15 @@ void acornscsi_dma_intr(AS_Host *host)
1209 * attention condition. We continue giving one byte until 1206 * attention condition. We continue giving one byte until
1210 * the device recognises the attention. 1207 * the device recognises the attention.
1211 */ 1208 */
1212 if (dmac_read(host->dma.io_port, DMAC_STATUS) & STATUS_RQ0) { 1209 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
1213 acornscsi_abortcmd(host, host->SCpnt->tag); 1210 acornscsi_abortcmd(host, host->SCpnt->tag);
1214 1211
1215 dmac_write(host->dma.io_port, DMAC_TXCNTLO, 0); 1212 dmac_write(host, DMAC_TXCNTLO, 0);
1216 dmac_write(host->dma.io_port, DMAC_TXCNTHI, 0); 1213 dmac_write(host, DMAC_TXCNTHI, 0);
1217 dmac_write(host->dma.io_port, DMAC_TXADRLO, 0); 1214 dmac_write(host, DMAC_TXADRLO, 0);
1218 dmac_write(host->dma.io_port, DMAC_TXADRMD, 0); 1215 dmac_write(host, DMAC_TXADRMD, 0);
1219 dmac_write(host->dma.io_port, DMAC_TXADRHI, 0); 1216 dmac_write(host, DMAC_TXADRHI, 0);
1220 dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF); 1217 dmac_write(host, DMAC_MASKREG, MASK_OFF);
1221 } 1218 }
1222#endif 1219#endif
1223 } 1220 }
@@ -1271,9 +1268,9 @@ void acornscsi_dma_adjust(AS_Host *host)
1271 host->dma.xfer_setup = 0; 1268 host->dma.xfer_setup = 0;
1272 else { 1269 else {
1273 transferred += host->dma.start_addr; 1270 transferred += host->dma.start_addr;
1274 dmac_write(host->dma.io_port, DMAC_TXADRLO, transferred); 1271 dmac_write(host, DMAC_TXADRLO, transferred);
1275 dmac_write(host->dma.io_port, DMAC_TXADRMD, transferred >> 8); 1272 dmac_write(host, DMAC_TXADRMD, transferred >> 8);
1276 dmac_write(host->dma.io_port, DMAC_TXADRHI, transferred >> 16); 1273 dmac_write(host, DMAC_TXADRHI, transferred >> 16);
1277#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE)) 1274#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE))
1278 DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo")); 1275 DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo"));
1279#endif 1276#endif
@@ -1292,12 +1289,12 @@ acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int
1292 int my_ptr = *ptr; 1289 int my_ptr = *ptr;
1293 1290
1294 while (my_ptr < len) { 1291 while (my_ptr < len) {
1295 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 1292 asr = sbic_arm_read(host, SBIC_ASR);
1296 1293
1297 if (asr & ASR_DBR) { 1294 if (asr & ASR_DBR) {
1298 timeout = max_timeout; 1295 timeout = max_timeout;
1299 1296
1300 sbic_arm_write(host->scsi.io_port, SBIC_DATA, bytes[my_ptr++]); 1297 sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]);
1301 } else if (asr & ASR_INT) 1298 } else if (asr & ASR_INT)
1302 break; 1299 break;
1303 else if (--timeout == 0) 1300 else if (--timeout == 0)
@@ -1320,9 +1317,9 @@ acornscsi_sendcommand(AS_Host *host)
1320{ 1317{
1321 struct scsi_cmnd *SCpnt = host->SCpnt; 1318 struct scsi_cmnd *SCpnt = host->SCpnt;
1322 1319
1323 sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0); 1320 sbic_arm_write(host, SBIC_TRANSCNTH, 0);
1324 sbic_arm_writenext(host->scsi.io_port, 0); 1321 sbic_arm_writenext(host, 0);
1325 sbic_arm_writenext(host->scsi.io_port, SCpnt->cmd_len - host->scsi.SCp.sent_command); 1322 sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command);
1326 1323
1327 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); 1324 acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
1328 1325
@@ -1351,7 +1348,7 @@ void acornscsi_sendmessage(AS_Host *host)
1351 1348
1352 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1"); 1349 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1");
1353 1350
1354 sbic_arm_write(host->scsi.io_port, SBIC_DATA, NOP); 1351 sbic_arm_write(host, SBIC_DATA, NOP);
1355 1352
1356 host->scsi.last_message = NOP; 1353 host->scsi.last_message = NOP;
1357#if (DEBUG & DEBUG_MESSAGES) 1354#if (DEBUG & DEBUG_MESSAGES)
@@ -1365,7 +1362,7 @@ void acornscsi_sendmessage(AS_Host *host)
1365 1362
1366 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2"); 1363 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2");
1367 1364
1368 sbic_arm_write(host->scsi.io_port, SBIC_DATA, msg->msg[0]); 1365 sbic_arm_write(host, SBIC_DATA, msg->msg[0]);
1369 1366
1370 host->scsi.last_message = msg->msg[0]; 1367 host->scsi.last_message = msg->msg[0];
1371#if (DEBUG & DEBUG_MESSAGES) 1368#if (DEBUG & DEBUG_MESSAGES)
@@ -1382,9 +1379,9 @@ void acornscsi_sendmessage(AS_Host *host)
1382 * initiator. This provides an interlock so that the 1379 * initiator. This provides an interlock so that the
1383 * initiator can determine which message byte is rejected. 1380 * initiator can determine which message byte is rejected.
1384 */ 1381 */
1385 sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0); 1382 sbic_arm_write(host, SBIC_TRANSCNTH, 0);
1386 sbic_arm_writenext(host->scsi.io_port, 0); 1383 sbic_arm_writenext(host, 0);
1387 sbic_arm_writenext(host->scsi.io_port, message_length); 1384 sbic_arm_writenext(host, message_length);
1388 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); 1385 acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
1389 1386
1390 msgnr = 0; 1387 msgnr = 0;
@@ -1421,7 +1418,7 @@ void acornscsi_readstatusbyte(AS_Host *host)
1421{ 1418{
1422 acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT); 1419 acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT);
1423 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte"); 1420 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte");
1424 host->scsi.SCp.Status = sbic_arm_read(host->scsi.io_port, SBIC_DATA); 1421 host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA);
1425} 1422}
1426 1423
1427/* 1424/*
@@ -1438,12 +1435,12 @@ unsigned char acornscsi_readmessagebyte(AS_Host *host)
1438 1435
1439 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte"); 1436 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte");
1440 1437
1441 message = sbic_arm_read(host->scsi.io_port, SBIC_DATA); 1438 message = sbic_arm_read(host, SBIC_DATA);
1442 1439
1443 /* wait for MSGIN-XFER-PAUSED */ 1440 /* wait for MSGIN-XFER-PAUSED */
1444 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte"); 1441 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte");
1445 1442
1446 sbic_arm_read(host->scsi.io_port, SBIC_SSR); 1443 sbic_arm_read(host, SBIC_SSR);
1447 1444
1448 return message; 1445 return message;
1449} 1446}
@@ -1480,7 +1477,7 @@ void acornscsi_message(AS_Host *host)
1480 1477
1481 /* wait for next msg-in */ 1478 /* wait for next msg-in */
1482 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack"); 1479 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack");
1483 sbic_arm_read(host->scsi.io_port, SBIC_SSR); 1480 sbic_arm_read(host, SBIC_SSR);
1484 } 1481 }
1485 } while (msgidx < msglen); 1482 } while (msgidx < msglen);
1486 1483
@@ -1602,7 +1599,7 @@ void acornscsi_message(AS_Host *host)
1602 host->host->host_no, acornscsi_target(host)); 1599 host->host->host_no, acornscsi_target(host));
1603 host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA; 1600 host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA;
1604 host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS; 1601 host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS;
1605 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1602 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
1606 break; 1603 break;
1607 1604
1608 default: 1605 default:
@@ -1652,7 +1649,7 @@ void acornscsi_message(AS_Host *host)
1652 host->device[host->SCpnt->device->id].sync_xfer = 1649 host->device[host->SCpnt->device->id].sync_xfer =
1653 calc_sync_xfer(period * 4, length); 1650 calc_sync_xfer(period * 4, length);
1654 } 1651 }
1655 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1652 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
1656 break; 1653 break;
1657#else 1654#else
1658 /* We do not accept synchronous transfers. Respond with a 1655 /* We do not accept synchronous transfers. Respond with a
@@ -1792,10 +1789,10 @@ int acornscsi_starttransfer(AS_Host *host)
1792 1789
1793 residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred; 1790 residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
1794 1791
1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1792 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16); 1793 sbic_arm_writenext(host, residual >> 16);
1797 sbic_arm_writenext(host->scsi.io_port, residual >> 8); 1794 sbic_arm_writenext(host, residual >> 8);
1798 sbic_arm_writenext(host->scsi.io_port, residual); 1795 sbic_arm_writenext(host, residual);
1799 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); 1796 acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
1800 return 1; 1797 return 1;
1801} 1798}
@@ -1816,7 +1813,7 @@ int acornscsi_reconnect(AS_Host *host)
1816{ 1813{
1817 unsigned int target, lun, ok = 0; 1814 unsigned int target, lun, ok = 0;
1818 1815
1819 target = sbic_arm_read(host->scsi.io_port, SBIC_SOURCEID); 1816 target = sbic_arm_read(host, SBIC_SOURCEID);
1820 1817
1821 if (!(target & 8)) 1818 if (!(target & 8))
1822 printk(KERN_ERR "scsi%d: invalid source id after reselection " 1819 printk(KERN_ERR "scsi%d: invalid source id after reselection "
@@ -1832,7 +1829,7 @@ int acornscsi_reconnect(AS_Host *host)
1832 host->SCpnt = NULL; 1829 host->SCpnt = NULL;
1833 } 1830 }
1834 1831
1835 lun = sbic_arm_read(host->scsi.io_port, SBIC_DATA) & 7; 1832 lun = sbic_arm_read(host, SBIC_DATA) & 7;
1836 1833
1837 host->scsi.reconnected.target = target; 1834 host->scsi.reconnected.target = target;
1838 host->scsi.reconnected.lun = lun; 1835 host->scsi.reconnected.lun = lun;
@@ -1952,7 +1949,7 @@ static
1952void acornscsi_abortcmd(AS_Host *host, unsigned char tag) 1949void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
1953{ 1950{
1954 host->scsi.phase = PHASE_ABORTED; 1951 host->scsi.phase = PHASE_ABORTED;
1955 sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_ASSERTATN); 1952 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
1956 1953
1957 msgqueue_flush(&host->scsi.msgs); 1954 msgqueue_flush(&host->scsi.msgs);
1958#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1955#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
@@ -1979,11 +1976,11 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
1979{ 1976{
1980 unsigned int asr, ssr; 1977 unsigned int asr, ssr;
1981 1978
1982 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 1979 asr = sbic_arm_read(host, SBIC_ASR);
1983 if (!(asr & ASR_INT)) 1980 if (!(asr & ASR_INT))
1984 return INTR_IDLE; 1981 return INTR_IDLE;
1985 1982
1986 ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR); 1983 ssr = sbic_arm_read(host, SBIC_SSR);
1987 1984
1988#if (DEBUG & DEBUG_PHASES) 1985#if (DEBUG & DEBUG_PHASES)
1989 print_sbic_status(asr, ssr, host->scsi.phase); 1986 print_sbic_status(asr, ssr, host->scsi.phase);
@@ -1999,15 +1996,15 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
1999 printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n", 1996 printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n",
2000 host->host->host_no); 1997 host->host->host_no);
2001 /* setup sbic - WD33C93A */ 1998 /* setup sbic - WD33C93A */
2002 sbic_arm_write(host->scsi.io_port, SBIC_OWNID, OWNID_EAF | host->host->this_id); 1999 sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
2003 sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_RESET); 2000 sbic_arm_write(host, SBIC_CMND, CMND_RESET);
2004 return INTR_IDLE; 2001 return INTR_IDLE;
2005 2002
2006 case 0x01: /* reset state - advanced */ 2003 case 0x01: /* reset state - advanced */
2007 sbic_arm_write(host->scsi.io_port, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); 2004 sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
2008 sbic_arm_write(host->scsi.io_port, SBIC_TIMEOUT, TIMEOUT_TIME); 2005 sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
2009 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); 2006 sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
2010 sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); 2007 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
2011 msgqueue_flush(&host->scsi.msgs); 2008 msgqueue_flush(&host->scsi.msgs);
2012 return INTR_IDLE; 2009 return INTR_IDLE;
2013 2010
@@ -2025,10 +2022,10 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2025 msgqueue_flush(&host->scsi.msgs); 2022 msgqueue_flush(&host->scsi.msgs);
2026 host->dma.transferred = host->scsi.SCp.scsi_xferred; 2023 host->dma.transferred = host->scsi.SCp.scsi_xferred;
2027 /* 33C93 gives next interrupt indicating bus phase */ 2024 /* 33C93 gives next interrupt indicating bus phase */
2028 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 2025 asr = sbic_arm_read(host, SBIC_ASR);
2029 if (!(asr & ASR_INT)) 2026 if (!(asr & ASR_INT))
2030 break; 2027 break;
2031 ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR); 2028 ssr = sbic_arm_read(host, SBIC_SSR);
2032 ADD_STATUS(8, ssr, host->scsi.phase, 1); 2029 ADD_STATUS(8, ssr, host->scsi.phase, 1);
2033 ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1); 2030 ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1);
2034 goto connected; 2031 goto connected;
@@ -2476,11 +2473,11 @@ acornscsi_intr(int irq, void *dev_id)
2476 do { 2473 do {
2477 ret = INTR_IDLE; 2474 ret = INTR_IDLE;
2478 2475
2479 iostatus = inb(host->card.io_intr); 2476 iostatus = readb(host->fast + INT_REG);
2480 2477
2481 if (iostatus & 2) { 2478 if (iostatus & 2) {
2482 acornscsi_dma_intr(host); 2479 acornscsi_dma_intr(host);
2483 iostatus = inb(host->card.io_intr); 2480 iostatus = readb(host->fast + INT_REG);
2484 } 2481 }
2485 2482
2486 if (iostatus & 8) 2483 if (iostatus & 8)
@@ -2655,7 +2652,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
2655 * busylun bit. 2652 * busylun bit.
2656 */ 2653 */
2657 case PHASE_CONNECTED: 2654 case PHASE_CONNECTED:
2658 sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_DISCONNECT); 2655 sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT);
2659 host->SCpnt = NULL; 2656 host->SCpnt = NULL;
2660 res = res_success_clear; 2657 res = res_success_clear;
2661 break; 2658 break;
@@ -2699,8 +2696,8 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2699#if (DEBUG & DEBUG_ABORT) 2696#if (DEBUG & DEBUG_ABORT)
2700 { 2697 {
2701 int asr, ssr; 2698 int asr, ssr;
2702 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 2699 asr = sbic_arm_read(host, SBIC_ASR);
2703 ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR); 2700 ssr = sbic_arm_read(host, SBIC_SSR);
2704 2701
2705 printk(KERN_WARNING "acornscsi_abort: "); 2702 printk(KERN_WARNING "acornscsi_abort: ");
2706 print_sbic_status(asr, ssr, host->scsi.phase); 2703 print_sbic_status(asr, ssr, host->scsi.phase);
@@ -2731,9 +2728,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2731//#if (DEBUG & DEBUG_ABORT) 2728//#if (DEBUG & DEBUG_ABORT)
2732 printk("success\n"); 2729 printk("success\n");
2733//#endif 2730//#endif
2734 SCpnt->result = DID_ABORT << 16; 2731 result = SUCCESS;
2735 SCpnt->scsi_done(SCpnt);
2736 result = SCSI_ABORT_SUCCESS;
2737 break; 2732 break;
2738 2733
2739 /* 2734 /*
@@ -2745,7 +2740,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2745//#if (DEBUG & DEBUG_ABORT) 2740//#if (DEBUG & DEBUG_ABORT)
2746 printk("snooze\n"); 2741 printk("snooze\n");
2747//#endif 2742//#endif
2748 result = SCSI_ABORT_SNOOZE; 2743 result = FAILED;
2749 break; 2744 break;
2750 2745
2751 /* 2746 /*
@@ -2755,11 +2750,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2755 default: 2750 default:
2756 case res_not_running: 2751 case res_not_running:
2757 acornscsi_dumplog(host, SCpnt->device->id); 2752 acornscsi_dumplog(host, SCpnt->device->id);
2758#if (DEBUG & DEBUG_ABORT) 2753 result = FAILED;
2759 result = SCSI_ABORT_SNOOZE;
2760#else
2761 result = SCSI_ABORT_NOT_RUNNING;
2762#endif
2763//#if (DEBUG & DEBUG_ABORT) 2754//#if (DEBUG & DEBUG_ABORT)
2764 printk("not running\n"); 2755 printk("not running\n");
2765//#endif 2756//#endif
@@ -2770,13 +2761,12 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2770} 2761}
2771 2762
2772/* 2763/*
2773 * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags) 2764 * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt)
2774 * Purpose : reset a command on this host/reset this host 2765 * Purpose : reset a command on this host/reset this host
2775 * Params : SCpnt - command causing reset 2766 * Params : SCpnt - command causing reset
2776 * result - what type of reset to perform
2777 * Returns : one of SCSI_RESET_ macros 2767 * Returns : one of SCSI_RESET_ macros
2778 */ 2768 */
2779int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags) 2769int acornscsi_bus_reset(struct scsi_cmnd *SCpnt)
2780{ 2770{
2781 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; 2771 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
2782 struct scsi_cmnd *SCptr; 2772 struct scsi_cmnd *SCptr;
@@ -2787,8 +2777,8 @@ int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
2787 { 2777 {
2788 int asr, ssr; 2778 int asr, ssr;
2789 2779
2790 asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR); 2780 asr = sbic_arm_read(host, SBIC_ASR);
2791 ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR); 2781 ssr = sbic_arm_read(host, SBIC_SSR);
2792 2782
2793 printk(KERN_WARNING "acornscsi_reset: "); 2783 printk(KERN_WARNING "acornscsi_reset: ");
2794 print_sbic_status(asr, ssr, host->scsi.phase); 2784 print_sbic_status(asr, ssr, host->scsi.phase);
@@ -2798,28 +2788,16 @@ int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
2798 2788
2799 acornscsi_dma_stop(host); 2789 acornscsi_dma_stop(host);
2800 2790
2801 SCptr = host->SCpnt;
2802
2803 /* 2791 /*
2804 * do hard reset. This resets all devices on this host, and so we 2792 * do hard reset. This resets all devices on this host, and so we
2805 * must set the reset status on all commands. 2793 * must set the reset status on all commands.
2806 */ 2794 */
2807 acornscsi_resetcard(host); 2795 acornscsi_resetcard(host);
2808 2796
2809 /*
2810 * report reset on commands current connected/disconnected
2811 */
2812 acornscsi_reportstatus(&host->SCpnt, &SCptr, DID_RESET);
2813
2814 while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL) 2797 while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL)
2815 acornscsi_reportstatus(&SCptr, &SCpnt, DID_RESET); 2798 ;
2816
2817 if (SCpnt) {
2818 SCpnt->result = DID_RESET << 16;
2819 SCpnt->scsi_done(SCpnt);
2820 }
2821 2799
2822 return SCSI_RESET_BUS_RESET | SCSI_RESET_HOST_RESET | SCSI_RESET_SUCCESS; 2800 return SUCCESS;
2823} 2801}
2824 2802
2825/*============================================================================================== 2803/*==============================================================================================
@@ -2850,7 +2828,7 @@ char *acornscsi_info(struct Scsi_Host *host)
2850 " LINK" 2828 " LINK"
2851#endif 2829#endif
2852#if (DEBUG & DEBUG_NO_WRITE) 2830#if (DEBUG & DEBUG_NO_WRITE)
2853 " NOWRITE ("NO_WRITE_STR")" 2831 " NOWRITE (" __stringify(NO_WRITE) ")"
2854#endif 2832#endif
2855 , host->hostt->name, host->io_port, host->irq, 2833 , host->hostt->name, host->io_port, host->irq,
2856 VER_MAJOR, VER_MINOR, VER_PATCH); 2834 VER_MAJOR, VER_MINOR, VER_PATCH);
@@ -2881,15 +2859,15 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
2881 " LINK" 2859 " LINK"
2882#endif 2860#endif
2883#if (DEBUG & DEBUG_NO_WRITE) 2861#if (DEBUG & DEBUG_NO_WRITE)
2884 " NOWRITE ("NO_WRITE_STR")" 2862 " NOWRITE (" __stringify(NO_WRITE) ")"
2885#endif 2863#endif
2886 "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH); 2864 "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH);
2887 2865
2888 p += sprintf(p, "SBIC: WD33C93A Address: %08X IRQ : %d\n", 2866 p += sprintf(p, "SBIC: WD33C93A Address: %p IRQ : %d\n",
2889 host->scsi.io_port, host->scsi.irq); 2867 host->base + SBIC_REGIDX, host->scsi.irq);
2890#ifdef USE_DMAC 2868#ifdef USE_DMAC
2891 p += sprintf(p, "DMAC: uPC71071 Address: %08X IRQ : %d\n\n", 2869 p += sprintf(p, "DMAC: uPC71071 Address: %p IRQ : %d\n\n",
2892 host->dma.io_port, host->scsi.irq); 2870 host->base + DMAC_OFFSET, host->scsi.irq);
2893#endif 2871#endif
2894 2872
2895 p += sprintf(p, "Statistics:\n" 2873 p += sprintf(p, "Statistics:\n"
@@ -2976,9 +2954,8 @@ static struct scsi_host_template acornscsi_template = {
2976 .name = "AcornSCSI", 2954 .name = "AcornSCSI",
2977 .info = acornscsi_info, 2955 .info = acornscsi_info,
2978 .queuecommand = acornscsi_queuecmd, 2956 .queuecommand = acornscsi_queuecmd,
2979#warning fixme 2957 .eh_abort_handler = acornscsi_abort,
2980 .abort = acornscsi_abort, 2958 .eh_bus_reset_handler = acornscsi_bus_reset,
2981 .reset = acornscsi_reset,
2982 .can_queue = 16, 2959 .can_queue = 16,
2983 .this_id = 7, 2960 .this_id = 7,
2984 .sg_tablesize = SG_ALL, 2961 .sg_tablesize = SG_ALL,
@@ -2992,48 +2969,37 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
2992{ 2969{
2993 struct Scsi_Host *host; 2970 struct Scsi_Host *host;
2994 AS_Host *ashost; 2971 AS_Host *ashost;
2995 int ret = -ENOMEM; 2972 int ret;
2996 2973
2997 host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host)); 2974 ret = ecard_request_resources(ec);
2998 if (!host) 2975 if (ret)
2999 goto out; 2976 goto out;
3000 2977
2978 host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host));
2979 if (!host) {
2980 ret = -ENOMEM;
2981 goto out_release;
2982 }
2983
3001 ashost = (AS_Host *)host->hostdata; 2984 ashost = (AS_Host *)host->hostdata;
3002 2985
3003 host->io_port = ecard_address(ec, ECARD_MEMC, 0); 2986 ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
3004 host->irq = ec->irq; 2987 ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
2988 if (!ashost->base || !ashost->fast)
2989 goto out_put;
3005 2990
3006 ashost->host = host; 2991 host->irq = ec->irq;
3007 ashost->scsi.io_port = ioaddr(host->io_port + 0x800); 2992 ashost->host = host;
3008 ashost->scsi.irq = host->irq; 2993 ashost->scsi.irq = host->irq;
3009 ashost->card.io_intr = POD_SPACE(host->io_port) + 0x800;
3010 ashost->card.io_page = POD_SPACE(host->io_port) + 0xc00;
3011 ashost->card.io_ram = ioaddr(host->io_port);
3012 ashost->dma.io_port = host->io_port + 0xc00;
3013 ashost->dma.io_intr_clear = POD_SPACE(host->io_port) + 0x800;
3014 2994
3015 ec->irqaddr = (char *)ioaddr(ashost->card.io_intr); 2995 ec->irqaddr = ashost->fast + INT_REG;
3016 ec->irqmask = 0x0a; 2996 ec->irqmask = 0x0a;
3017 2997
3018 ret = -EBUSY;
3019 if (!request_region(host->io_port + 0x800, 2, "acornscsi(sbic)"))
3020 goto err_1;
3021 if (!request_region(ashost->card.io_intr, 1, "acornscsi(intr)"))
3022 goto err_2;
3023 if (!request_region(ashost->card.io_page, 1, "acornscsi(page)"))
3024 goto err_3;
3025#ifdef USE_DMAC
3026 if (!request_region(ashost->dma.io_port, 256, "acornscsi(dmac)"))
3027 goto err_4;
3028#endif
3029 if (!request_region(host->io_port, 2048, "acornscsi(ram)"))
3030 goto err_5;
3031
3032 ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); 2998 ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
3033 if (ret) { 2999 if (ret) {
3034 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", 3000 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
3035 host->host_no, ashost->scsi.irq, ret); 3001 host->host_no, ashost->scsi.irq, ret);
3036 goto err_6; 3002 goto out_put;
3037 } 3003 }
3038 3004
3039 memset(&ashost->stats, 0, sizeof (ashost->stats)); 3005 memset(&ashost->stats, 0, sizeof (ashost->stats));
@@ -3045,27 +3011,22 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
3045 3011
3046 ret = scsi_add_host(host, &ec->dev); 3012 ret = scsi_add_host(host, &ec->dev);
3047 if (ret) 3013 if (ret)
3048 goto err_7; 3014 goto out_irq;
3049 3015
3050 scsi_scan_host(host); 3016 scsi_scan_host(host);
3051 goto out; 3017 goto out;
3052 3018
3053 err_7: 3019 out_irq:
3054 free_irq(host->irq, ashost); 3020 free_irq(host->irq, ashost);
3055 err_6: 3021 msgqueue_free(&ashost->scsi.msgs);
3056 release_region(host->io_port, 2048); 3022 queue_free(&ashost->queues.disconnected);
3057 err_5: 3023 queue_free(&ashost->queues.issue);
3058#ifdef USE_DMAC 3024 out_put:
3059 release_region(ashost->dma.io_port, 256); 3025 ecardm_iounmap(ec, ashost->fast);
3060#endif 3026 ecardm_iounmap(ec, ashost->base);
3061 err_4:
3062 release_region(ashost->card.io_page, 1);
3063 err_3:
3064 release_region(ashost->card.io_intr, 1);
3065 err_2:
3066 release_region(host->io_port + 0x800, 2);
3067 err_1:
3068 scsi_host_put(host); 3027 scsi_host_put(host);
3028 out_release:
3029 ecard_release_resources(ec);
3069 out: 3030 out:
3070 return ret; 3031 return ret;
3071} 3032}
@@ -3081,20 +3042,17 @@ static void __devexit acornscsi_remove(struct expansion_card *ec)
3081 /* 3042 /*
3082 * Put card into RESET state 3043 * Put card into RESET state
3083 */ 3044 */
3084 outb(0x80, ashost->card.io_page); 3045 writeb(0x80, ashost->fast + PAGE_REG);
3085 3046
3086 free_irq(host->irq, ashost); 3047 free_irq(host->irq, ashost);
3087 3048
3088 release_region(host->io_port + 0x800, 2);
3089 release_region(ashost->card.io_intr, 1);
3090 release_region(ashost->card.io_page, 1);
3091 release_region(ashost->dma.io_port, 256);
3092 release_region(host->io_port, 2048);
3093
3094 msgqueue_free(&ashost->scsi.msgs); 3049 msgqueue_free(&ashost->scsi.msgs);
3095 queue_free(&ashost->queues.disconnected); 3050 queue_free(&ashost->queues.disconnected);
3096 queue_free(&ashost->queues.issue); 3051 queue_free(&ashost->queues.issue);
3052 ecardm_iounmap(ec, ashost->fast);
3053 ecardm_iounmap(ec, ashost->base);
3097 scsi_host_put(host); 3054 scsi_host_put(host);
3055 ecard_release_resources(ec);
3098} 3056}
3099 3057
3100static const struct ecard_id acornscsi_cids[] = { 3058static const struct ecard_id acornscsi_cids[] = {
diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h
index d11424b89f42..8d2172a0b351 100644
--- a/drivers/scsi/arm/acornscsi.h
+++ b/drivers/scsi/arm/acornscsi.h
@@ -179,7 +179,6 @@
179 179
180/* miscellaneous internal variables */ 180/* miscellaneous internal variables */
181 181
182#define POD_SPACE(x) ((x) + 0xd0000)
183#define MASK_ON (MASKREG_M3|MASKREG_M2|MASKREG_M1|MASKREG_M0) 182#define MASK_ON (MASKREG_M3|MASKREG_M2|MASKREG_M1|MASKREG_M0)
184#define MASK_OFF (MASKREG_M3|MASKREG_M2|MASKREG_M1) 183#define MASK_OFF (MASKREG_M3|MASKREG_M2|MASKREG_M1)
185 184
@@ -279,10 +278,11 @@ typedef struct acornscsi_hostdata {
279 struct Scsi_Host *host; /* host */ 278 struct Scsi_Host *host; /* host */
280 struct scsi_cmnd *SCpnt; /* currently processing command */ 279 struct scsi_cmnd *SCpnt; /* currently processing command */
281 struct scsi_cmnd *origSCpnt; /* original connecting command */ 280 struct scsi_cmnd *origSCpnt; /* original connecting command */
281 void __iomem *base; /* memc base address */
282 void __iomem *fast; /* fast ioc base address */
282 283
283 /* driver information */ 284 /* driver information */
284 struct { 285 struct {
285 unsigned int io_port; /* base address of WD33C93 */
286 unsigned int irq; /* interrupt */ 286 unsigned int irq; /* interrupt */
287 phase_t phase; /* current phase */ 287 phase_t phase; /* current phase */
288 288
@@ -329,8 +329,6 @@ typedef struct acornscsi_hostdata {
329 329
330 /* DMA info */ 330 /* DMA info */
331 struct { 331 struct {
332 unsigned int io_port; /* base address of DMA controller */
333 unsigned int io_intr_clear; /* address of DMA interrupt clear */
334 unsigned int free_addr; /* next free address */ 332 unsigned int free_addr; /* next free address */
335 unsigned int start_addr; /* start address of current transfer */ 333 unsigned int start_addr; /* start address of current transfer */
336 dmadir_t direction; /* dma direction */ 334 dmadir_t direction; /* dma direction */
@@ -345,9 +343,6 @@ typedef struct acornscsi_hostdata {
345 343
346 /* card info */ 344 /* card info */
347 struct { 345 struct {
348 unsigned int io_intr; /* base address of interrupt id reg */
349 unsigned int io_page; /* base address of page reg */
350 unsigned int io_ram; /* base address of RAM access */
351 unsigned char page_reg; /* current setting of page reg */ 346 unsigned char page_reg; /* current setting of page reg */
352 } card; 347 } card;
353 348
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index c4b938bc30d3..aa2011b64683 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -22,6 +22,7 @@
22#include <linux/chio.h> /* here are all the ioctls */ 22#include <linux/chio.h> /* here are all the ioctls */
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/smp_lock.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
@@ -571,16 +572,19 @@ ch_open(struct inode *inode, struct file *file)
571 scsi_changer *ch; 572 scsi_changer *ch;
572 int minor = iminor(inode); 573 int minor = iminor(inode);
573 574
575 lock_kernel();
574 spin_lock(&ch_index_lock); 576 spin_lock(&ch_index_lock);
575 ch = idr_find(&ch_index_idr, minor); 577 ch = idr_find(&ch_index_idr, minor);
576 578
577 if (NULL == ch || scsi_device_get(ch->device)) { 579 if (NULL == ch || scsi_device_get(ch->device)) {
578 spin_unlock(&ch_index_lock); 580 spin_unlock(&ch_index_lock);
581 unlock_kernel();
579 return -ENXIO; 582 return -ENXIO;
580 } 583 }
581 spin_unlock(&ch_index_lock); 584 spin_unlock(&ch_index_lock);
582 585
583 file->private_data = ch; 586 file->private_data = ch;
587 unlock_kernel();
584 return 0; 588 return 0;
585} 589}
586 590
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8508816f303d..2bc30e32b67a 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -49,6 +49,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
49#include <linux/kernel.h> /* for printk */ 49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h> 50#include <linux/sched.h>
51#include <linux/reboot.h> 51#include <linux/reboot.h>
52#include <linux/smp_lock.h>
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53#include <linux/dma-mapping.h> 54#include <linux/dma-mapping.h>
54 55
@@ -1727,10 +1728,12 @@ static int adpt_open(struct inode *inode, struct file *file)
1727 int minor; 1728 int minor;
1728 adpt_hba* pHba; 1729 adpt_hba* pHba;
1729 1730
1731 lock_kernel();
1730 //TODO check for root access 1732 //TODO check for root access
1731 // 1733 //
1732 minor = iminor(inode); 1734 minor = iminor(inode);
1733 if (minor >= hba_count) { 1735 if (minor >= hba_count) {
1736 unlock_kernel();
1734 return -ENXIO; 1737 return -ENXIO;
1735 } 1738 }
1736 mutex_lock(&adpt_configuration_lock); 1739 mutex_lock(&adpt_configuration_lock);
@@ -1741,6 +1744,7 @@ static int adpt_open(struct inode *inode, struct file *file)
1741 } 1744 }
1742 if (pHba == NULL) { 1745 if (pHba == NULL) {
1743 mutex_unlock(&adpt_configuration_lock); 1746 mutex_unlock(&adpt_configuration_lock);
1747 unlock_kernel();
1744 return -ENXIO; 1748 return -ENXIO;
1745 } 1749 }
1746 1750
@@ -1751,6 +1755,7 @@ static int adpt_open(struct inode *inode, struct file *file)
1751 1755
1752 pHba->in_use = 1; 1756 pHba->in_use = 1;
1753 mutex_unlock(&adpt_configuration_lock); 1757 mutex_unlock(&adpt_configuration_lock);
1758 unlock_kernel();
1754 1759
1755 return 0; 1760 return 0;
1756} 1761}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 46771d4c81bd..822d5214692b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -120,6 +120,7 @@
120#include <linux/timer.h> 120#include <linux/timer.h>
121#include <linux/dma-mapping.h> 121#include <linux/dma-mapping.h>
122#include <linux/list.h> 122#include <linux/list.h>
123#include <linux/smp_lock.h>
123 124
124#ifdef GDTH_RTC 125#ifdef GDTH_RTC
125#include <linux/mc146818rtc.h> 126#include <linux/mc146818rtc.h>
@@ -4019,10 +4020,12 @@ static int gdth_open(struct inode *inode, struct file *filep)
4019{ 4020{
4020 gdth_ha_str *ha; 4021 gdth_ha_str *ha;
4021 4022
4023 lock_kernel();
4022 list_for_each_entry(ha, &gdth_instances, list) { 4024 list_for_each_entry(ha, &gdth_instances, list) {
4023 if (!ha->sdev) 4025 if (!ha->sdev)
4024 ha->sdev = scsi_get_host_dev(ha->shost); 4026 ha->sdev = scsi_get_host_dev(ha->shost);
4025 } 4027 }
4028 unlock_kernel();
4026 4029
4027 TRACE(("gdth_open()\n")); 4030 TRACE(("gdth_open()\n"));
4028 return 0; 4031 return 0;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 18551aaf5e09..28c9da7d4a5c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -46,6 +46,7 @@
46#include <linux/pci.h> 46#include <linux/pci.h>
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/dma-mapping.h> 48#include <linux/dma-mapping.h>
49#include <linux/smp_lock.h>
49#include <scsi/scsicam.h> 50#include <scsi/scsicam.h>
50 51
51#include "scsi.h" 52#include "scsi.h"
@@ -3272,12 +3273,12 @@ mega_init_scb(adapter_t *adapter)
3272 * @filep - unused 3273 * @filep - unused
3273 * 3274 *
3274 * Routines for the character/ioctl interface to the driver. Find out if this 3275 * Routines for the character/ioctl interface to the driver. Find out if this
3275 * is a valid open. If yes, increment the module use count so that it cannot 3276 * is a valid open.
3276 * be unloaded.
3277 */ 3277 */
3278static int 3278static int
3279megadev_open (struct inode *inode, struct file *filep) 3279megadev_open (struct inode *inode, struct file *filep)
3280{ 3280{
3281 cycle_kernel_lock();
3281 /* 3282 /*
3282 * Only allow superuser to access private ioctl interface 3283 * Only allow superuser to access private ioctl interface
3283 */ 3284 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 0ad215e27b83..ac3b280c2a72 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -15,6 +15,7 @@
15 * Common management module 15 * Common management module
16 */ 16 */
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/smp_lock.h>
18#include "megaraid_mm.h" 19#include "megaraid_mm.h"
19 20
20 21
@@ -96,6 +97,7 @@ mraid_mm_open(struct inode *inode, struct file *filep)
96 */ 97 */
97 if (!capable(CAP_SYS_ADMIN)) return (-EACCES); 98 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
98 99
100 cycle_kernel_lock();
99 return 0; 101 return 0;
100} 102}
101 103
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7d84c8bbcf3f..fc7ac158476c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -33,6 +33,7 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/smp_lock.h>
36#include <linux/uio.h> 37#include <linux/uio.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38#include <linux/fs.h> 39#include <linux/fs.h>
@@ -2863,6 +2864,7 @@ static void megasas_shutdown(struct pci_dev *pdev)
2863 */ 2864 */
2864static int megasas_mgmt_open(struct inode *inode, struct file *filep) 2865static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2865{ 2866{
2867 cycle_kernel_lock();
2866 /* 2868 /*
2867 * Allow only those users with admin rights 2869 * Allow only those users with admin rights
2868 */ 2870 */
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 243d8becd30f..1c79f9794f4e 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -50,6 +50,7 @@ static const char * osst_version = "0.99.4";
50#include <linux/moduleparam.h> 50#include <linux/moduleparam.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/jiffies.h> 52#include <linux/jiffies.h>
53#include <linux/smp_lock.h>
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
54#include <asm/dma.h> 55#include <asm/dma.h>
55#include <asm/system.h> 56#include <asm/system.h>
@@ -4359,7 +4360,7 @@ os_bypass:
4359 4360
4360 4361
4361/* Open the device */ 4362/* Open the device */
4362static int os_scsi_tape_open(struct inode * inode, struct file * filp) 4363static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
4363{ 4364{
4364 unsigned short flags; 4365 unsigned short flags;
4365 int i, b_size, new_session = 0, retval = 0; 4366 int i, b_size, new_session = 0, retval = 0;
@@ -4725,6 +4726,18 @@ err_out:
4725 return retval; 4726 return retval;
4726} 4727}
4727 4728
4729/* BKL pushdown: spaghetti avoidance wrapper */
4730static int os_scsi_tape_open(struct inode * inode, struct file * filp)
4731{
4732 int ret;
4733
4734 lock_kernel();
4735 ret = __os_scsi_tape_open(inode, filp);
4736 unlock_kernel();
4737 return ret;
4738}
4739
4740
4728 4741
4729/* Flush the tape buffer before close */ 4742/* Flush the tape buffer before close */
4730static int os_scsi_tape_flush(struct file * filp, fl_owner_t id) 4743static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index d2557dbc2dc1..0e9533f7aabc 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -21,6 +21,7 @@
21 */ 21 */
22#include <linux/miscdevice.h> 22#include <linux/miscdevice.h>
23#include <linux/file.h> 23#include <linux/file.h>
24#include <linux/smp_lock.h>
24#include <net/tcp.h> 25#include <net/tcp.h>
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_cmnd.h> 27#include <scsi/scsi_cmnd.h>
@@ -321,6 +322,7 @@ static int tgt_open(struct inode *inode, struct file *file)
321{ 322{
322 tx_ring.tr_idx = rx_ring.tr_idx = 0; 323 tx_ring.tr_idx = rx_ring.tr_idx = 0;
323 324
325 cycle_kernel_lock();
324 return 0; 326 return 0;
325} 327}
326 328
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ea0edd1b2e76..fccd2e88d600 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -49,6 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h> 50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 51#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h>
52 53
53#include "scsi.h" 54#include "scsi.h"
54#include <scsi/scsi_dbg.h> 55#include <scsi/scsi_dbg.h>
@@ -182,8 +183,9 @@ static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
182 int tablesize); 183 int tablesize);
183static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 184static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
184 Sg_request * srp); 185 Sg_request * srp);
185static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 186static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
186 int blocking, int read_only, Sg_request ** o_srp); 187 const char __user *buf, size_t count, int blocking,
188 int read_only, Sg_request **o_srp);
187static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
188 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
189static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
@@ -204,7 +206,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
204static Sg_request *sg_add_request(Sg_fd * sfp); 206static Sg_request *sg_add_request(Sg_fd * sfp);
205static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
206static int sg_res_in_use(Sg_fd * sfp); 208static int sg_res_in_use(Sg_fd * sfp);
207static int sg_allow_access(unsigned char opcode, char dev_type);
208static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
209static Sg_device *sg_get_dev(int dev); 210static Sg_device *sg_get_dev(int dev);
210#ifdef CONFIG_SCSI_PROC_FS 211#ifdef CONFIG_SCSI_PROC_FS
@@ -227,19 +228,26 @@ sg_open(struct inode *inode, struct file *filp)
227 int res; 228 int res;
228 int retval; 229 int retval;
229 230
231 lock_kernel();
230 nonseekable_open(inode, filp); 232 nonseekable_open(inode, filp);
231 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 233 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
232 sdp = sg_get_dev(dev); 234 sdp = sg_get_dev(dev);
233 if ((!sdp) || (!sdp->device)) 235 if ((!sdp) || (!sdp->device)) {
236 unlock_kernel();
234 return -ENXIO; 237 return -ENXIO;
235 if (sdp->detached) 238 }
239 if (sdp->detached) {
240 unlock_kernel();
236 return -ENODEV; 241 return -ENODEV;
242 }
237 243
238 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 244 /* This driver's module count bumped by fops_get in <linux/fs.h> */
239 /* Prevent the device driver from vanishing while we sleep */ 245 /* Prevent the device driver from vanishing while we sleep */
240 retval = scsi_device_get(sdp->device); 246 retval = scsi_device_get(sdp->device);
241 if (retval) 247 if (retval) {
248 unlock_kernel();
242 return retval; 249 return retval;
250 }
243 251
244 if (!((flags & O_NONBLOCK) || 252 if (!((flags & O_NONBLOCK) ||
245 scsi_block_when_processing_errors(sdp->device))) { 253 scsi_block_when_processing_errors(sdp->device))) {
@@ -295,10 +303,12 @@ sg_open(struct inode *inode, struct file *filp)
295 retval = -ENOMEM; 303 retval = -ENOMEM;
296 goto error_out; 304 goto error_out;
297 } 305 }
306 unlock_kernel();
298 return 0; 307 return 0;
299 308
300 error_out: 309 error_out:
301 scsi_device_put(sdp->device); 310 scsi_device_put(sdp->device);
311 unlock_kernel();
302 return retval; 312 return retval;
303} 313}
304 314
@@ -544,7 +554,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
544 return -EFAULT; 554 return -EFAULT;
545 blocking = !(filp->f_flags & O_NONBLOCK); 555 blocking = !(filp->f_flags & O_NONBLOCK);
546 if (old_hdr.reply_len < 0) 556 if (old_hdr.reply_len < 0)
547 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 557 return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
548 if (count < (SZ_SG_HEADER + 6)) 558 if (count < (SZ_SG_HEADER + 6))
549 return -EIO; /* The minimum scsi command length is 6 bytes. */ 559 return -EIO; /* The minimum scsi command length is 6 bytes. */
550 560
@@ -621,8 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
621} 631}
622 632
623static ssize_t 633static ssize_t
624sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 634sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
625 int blocking, int read_only, Sg_request ** o_srp) 635 size_t count, int blocking, int read_only,
636 Sg_request **o_srp)
626{ 637{
627 int k; 638 int k;
628 Sg_request *srp; 639 Sg_request *srp;
@@ -678,8 +689,7 @@ sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
678 sg_remove_request(sfp, srp); 689 sg_remove_request(sfp, srp);
679 return -EFAULT; 690 return -EFAULT;
680 } 691 }
681 if (read_only && 692 if (read_only && !blk_verify_command(file, cmnd)) {
682 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
683 sg_remove_request(sfp, srp); 693 sg_remove_request(sfp, srp);
684 return -EPERM; 694 return -EPERM;
685 } 695 }
@@ -799,7 +809,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
799 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 809 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
800 return -EFAULT; 810 return -EFAULT;
801 result = 811 result =
802 sg_new_write(sfp, p, SZ_SG_IO_HDR, 812 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
803 blocking, read_only, &srp); 813 blocking, read_only, &srp);
804 if (result < 0) 814 if (result < 0)
805 return result; 815 return result;
@@ -1048,7 +1058,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1048 1058
1049 if (copy_from_user(&opcode, siocp->data, 1)) 1059 if (copy_from_user(&opcode, siocp->data, 1))
1050 return -EFAULT; 1060 return -EFAULT;
1051 if (!sg_allow_access(opcode, sdp->device->type)) 1061 if (!blk_verify_command(filp, &opcode))
1052 return -EPERM; 1062 return -EPERM;
1053 } 1063 }
1054 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1064 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
@@ -2502,30 +2512,6 @@ sg_page_free(struct page *page, int size)
2502 __free_pages(page, order); 2512 __free_pages(page, order);
2503} 2513}
2504 2514
2505#ifndef MAINTENANCE_IN_CMD
2506#define MAINTENANCE_IN_CMD 0xa3
2507#endif
2508
2509static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2510 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2511 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2512 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2513};
2514
2515static int
2516sg_allow_access(unsigned char opcode, char dev_type)
2517{
2518 int k;
2519
2520 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2521 return 1;
2522 for (k = 0; k < sizeof (allow_ops); ++k) {
2523 if (opcode == allow_ops[k])
2524 return 1;
2525 }
2526 return 0;
2527}
2528
2529#ifdef CONFIG_SCSI_PROC_FS 2515#ifdef CONFIG_SCSI_PROC_FS
2530static int 2516static int
2531sg_idr_max_id(int id, void *p, void *data) 2517sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index c82df8bd4d89..27f5bfd1def3 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -673,24 +673,20 @@ fail:
673static void get_sectorsize(struct scsi_cd *cd) 673static void get_sectorsize(struct scsi_cd *cd)
674{ 674{
675 unsigned char cmd[10]; 675 unsigned char cmd[10];
676 unsigned char *buffer; 676 unsigned char buffer[8];
677 int the_result, retries = 3; 677 int the_result, retries = 3;
678 int sector_size; 678 int sector_size;
679 struct request_queue *queue; 679 struct request_queue *queue;
680 680
681 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
682 if (!buffer)
683 goto Enomem;
684
685 do { 681 do {
686 cmd[0] = READ_CAPACITY; 682 cmd[0] = READ_CAPACITY;
687 memset((void *) &cmd[1], 0, 9); 683 memset((void *) &cmd[1], 0, 9);
688 memset(buffer, 0, 8); 684 memset(buffer, 0, sizeof(buffer));
689 685
690 /* Do the command and wait.. */ 686 /* Do the command and wait.. */
691 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE, 687 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
692 buffer, 8, NULL, SR_TIMEOUT, 688 buffer, sizeof(buffer), NULL,
693 MAX_RETRIES); 689 SR_TIMEOUT, MAX_RETRIES);
694 690
695 retries--; 691 retries--;
696 692
@@ -745,14 +741,8 @@ static void get_sectorsize(struct scsi_cd *cd)
745 741
746 queue = cd->device->request_queue; 742 queue = cd->device->request_queue;
747 blk_queue_hardsect_size(queue, sector_size); 743 blk_queue_hardsect_size(queue, sector_size);
748out:
749 kfree(buffer);
750 return;
751 744
752Enomem: 745 return;
753 cd->capacity = 0x1fffff;
754 cd->device->sector_size = 2048; /* A guess, just in case */
755 goto out;
756} 746}
757 747
758static void get_capabilities(struct scsi_cd *cd) 748static void get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 6e5a5bb31311..4684cc716aa4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -38,6 +38,7 @@ static const char *verstr = "20080224";
38#include <linux/cdev.h> 38#include <linux/cdev.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/smp_lock.h>
41 42
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/dma.h> 44#include <asm/dma.h>
@@ -1113,7 +1114,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1113} 1114}
1114 1115
1115 1116
1116 /* Open the device. Needs to be called with BKL only because of incrementing the SCSI host 1117 /* Open the device. Needs to take the BKL only because of incrementing the SCSI host
1117 module count. */ 1118 module count. */
1118static int st_open(struct inode *inode, struct file *filp) 1119static int st_open(struct inode *inode, struct file *filp)
1119{ 1120{
@@ -1123,6 +1124,7 @@ static int st_open(struct inode *inode, struct file *filp)
1123 int dev = TAPE_NR(inode); 1124 int dev = TAPE_NR(inode);
1124 char *name; 1125 char *name;
1125 1126
1127 lock_kernel();
1126 /* 1128 /*
1127 * We really want to do nonseekable_open(inode, filp); here, but some 1129 * We really want to do nonseekable_open(inode, filp); here, but some
1128 * versions of tar incorrectly call lseek on tapes and bail out if that 1130 * versions of tar incorrectly call lseek on tapes and bail out if that
@@ -1130,8 +1132,10 @@ static int st_open(struct inode *inode, struct file *filp)
1130 */ 1132 */
1131 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); 1133 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
1132 1134
1133 if (!(STp = scsi_tape_get(dev))) 1135 if (!(STp = scsi_tape_get(dev))) {
1136 unlock_kernel();
1134 return -ENXIO; 1137 return -ENXIO;
1138 }
1135 1139
1136 write_lock(&st_dev_arr_lock); 1140 write_lock(&st_dev_arr_lock);
1137 filp->private_data = STp; 1141 filp->private_data = STp;
@@ -1140,6 +1144,7 @@ static int st_open(struct inode *inode, struct file *filp)
1140 if (STp->in_use) { 1144 if (STp->in_use) {
1141 write_unlock(&st_dev_arr_lock); 1145 write_unlock(&st_dev_arr_lock);
1142 scsi_tape_put(STp); 1146 scsi_tape_put(STp);
1147 unlock_kernel();
1143 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) 1148 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
1144 return (-EBUSY); 1149 return (-EBUSY);
1145 } 1150 }
@@ -1188,12 +1193,14 @@ static int st_open(struct inode *inode, struct file *filp)
1188 retval = (-EIO); 1193 retval = (-EIO);
1189 goto err_out; 1194 goto err_out;
1190 } 1195 }
1196 unlock_kernel();
1191 return 0; 1197 return 0;
1192 1198
1193 err_out: 1199 err_out:
1194 normalize_buffer(STp->buffer); 1200 normalize_buffer(STp->buffer);
1195 STp->in_use = 0; 1201 STp->in_use = 0;
1196 scsi_tape_put(STp); 1202 scsi_tape_put(STp);
1203 unlock_kernel();
1197 return retval; 1204 return retval;
1198 1205
1199} 1206}
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 9bc42763623c..18ca9075e131 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -448,22 +448,27 @@ config SERIAL_CLPS711X_CONSOLE
448 your boot loader (lilo or loadlin) about how to pass options to the 448 your boot loader (lilo or loadlin) about how to pass options to the
449 kernel at boot time.) 449 kernel at boot time.)
450 450
451config SERIAL_S3C2410 451config SERIAL_SAMSUNG
452 tristate "Samsung S3C2410/S3C2440/S3C2442/S3C2412 Serial port support" 452 tristate "Samsung SoC serial support"
453 depends on ARM && ARCH_S3C2410 453 depends on ARM && PLAT_S3C24XX
454 select SERIAL_CORE
455 help 454 help
456 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs, 455 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
457 providing /dev/ttySAC0, 1 and 2 (note, some machines may not 456 providing /dev/ttySAC0, 1 and 2 (note, some machines may not
458 provide all of these ports, depending on how the serial port 457 provide all of these ports, depending on how the serial port
459 pins are configured. 458 pins are configured.
460 459
461 Currently this driver supports the UARTS on the S3C2410, S3C2440, 460config SERIAL_SAMSUNG_DEBUG
462 S3C2442, S3C2412 and S3C2413 CPUs. 461 bool "Samsung SoC serial debug"
462 depends on SERIAL_SAMSUNG
463 help
464 Add support for debugging the serial driver. Since this is
465 generally being used as a console, we use our own output
466 routines that go via the low-level debug printascii()
467 function.
463 468
464config SERIAL_S3C2410_CONSOLE 469config SERIAL_SAMSUNG_CONSOLE
465 bool "Support for console on S3C2410 serial port" 470 bool "Support for console on Samsung SoC serial port"
466 depends on SERIAL_S3C2410=y 471 depends on SERIAL_SAMSUNG=y
467 select SERIAL_CORE_CONSOLE 472 select SERIAL_CORE_CONSOLE
468 help 473 help
469 Allow selection of the S3C24XX on-board serial ports for use as 474 Allow selection of the S3C24XX on-board serial ports for use as
@@ -476,6 +481,37 @@ config SERIAL_S3C2410_CONSOLE
476 your boot loader about how to pass options to the kernel at 481 your boot loader about how to pass options to the kernel at
477 boot time.) 482 boot time.)
478 483
484config SERIAL_S3C2400
485 tristate "Samsung S3C2410 Serial port support"
486 depends on ARM && SERIAL_SAMSUNG && CPU_S3C2400
487 default y if CPU_S3C2400
488 help
489 Serial port support for the Samsung S3C2400 SoC
490
491config SERIAL_S3C2410
492 tristate "Samsung S3C2410 Serial port support"
493 depends on SERIAL_SAMSUNG && CPU_S3C2410
494 default y if CPU_S3C2410
495 help
496 Serial port support for the Samsung S3C2410 SoC
497
498config SERIAL_S3C2412
499 tristate "Samsung S3C2412/S3C2413 Serial port support"
500 depends on SERIAL_SAMSUNG && CPU_S3C2412
501 default y if CPU_S3C2412
502 help
503 Serial port support for the Samsung S3C2412 and S3C2413 SoC
504
505config SERIAL_S3C2440
506 tristate "Samsung S3C2440/S3C2442 Serial port support"
507 depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442)
508 default y if CPU_S3C2440
509 default y if CPU_S3C2442
510 help
511 Serial port support for the Samsung S3C2440 and S3C2442 SoC
512
513
514
479config SERIAL_DZ 515config SERIAL_DZ
480 bool "DECstation DZ serial driver" 516 bool "DECstation DZ serial driver"
481 depends on MACH_DECSTATION && 32BIT 517 depends on MACH_DECSTATION && 32BIT
@@ -753,7 +789,7 @@ config BFIN_UART3_CTSRTS
753 789
754config SERIAL_IMX 790config SERIAL_IMX
755 bool "IMX serial port support" 791 bool "IMX serial port support"
756 depends on ARM && ARCH_IMX 792 depends on ARM && (ARCH_IMX || ARCH_MXC)
757 select SERIAL_CORE 793 select SERIAL_CORE
758 help 794 help
759 If you have a machine based on a Motorola IMX CPU you 795 If you have a machine based on a Motorola IMX CPU you
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 0d9c09b1e836..7d85c1fbe7e0 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -28,7 +28,11 @@ obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
28obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 28obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
29obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o 29obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
30obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o 30obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
31obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
32obj-$(CONFIG_SERIAL_S3C2400) += s3c2400.o
31obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o 33obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
34obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
35obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
32obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o 36obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
33obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o 37obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o
34obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o 38obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 42be8b01a40f..6aeef22bd203 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1439,14 +1439,29 @@ static struct uart_driver atmel_uart = {
1439}; 1439};
1440 1440
1441#ifdef CONFIG_PM 1441#ifdef CONFIG_PM
1442static bool atmel_serial_clk_will_stop(void)
1443{
1444#ifdef CONFIG_ARCH_AT91
1445 return at91_suspend_entering_slow_clock();
1446#else
1447 return false;
1448#endif
1449}
1450
1442static int atmel_serial_suspend(struct platform_device *pdev, 1451static int atmel_serial_suspend(struct platform_device *pdev,
1443 pm_message_t state) 1452 pm_message_t state)
1444{ 1453{
1445 struct uart_port *port = platform_get_drvdata(pdev); 1454 struct uart_port *port = platform_get_drvdata(pdev);
1446 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1455 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1447 1456
1457 if (atmel_is_console_port(port) && console_suspend_enabled) {
1458 /* Drain the TX shifter */
1459 while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
1460 cpu_relax();
1461 }
1462
1448 if (device_may_wakeup(&pdev->dev) 1463 if (device_may_wakeup(&pdev->dev)
1449 && !at91_suspend_entering_slow_clock()) 1464 && !atmel_serial_clk_will_stop())
1450 enable_irq_wake(port->irq); 1465 enable_irq_wake(port->irq);
1451 else { 1466 else {
1452 uart_suspend_port(&atmel_uart, port); 1467 uart_suspend_port(&atmel_uart, port);
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 5a375bf0ebf4..64acb39a51ba 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -40,6 +40,7 @@
40#include <linux/tty_flip.h> 40#include <linux/tty_flip.h>
41#include <linux/serial_core.h> 41#include <linux/serial_core.h>
42#include <linux/serial.h> 42#include <linux/serial.h>
43#include <linux/clk.h>
43 44
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
@@ -61,6 +62,11 @@
61#define UBIR 0xa4 /* BRM Incremental Register */ 62#define UBIR 0xa4 /* BRM Incremental Register */
62#define UBMR 0xa8 /* BRM Modulator Register */ 63#define UBMR 0xa8 /* BRM Modulator Register */
63#define UBRC 0xac /* Baud Rate Count Register */ 64#define UBRC 0xac /* Baud Rate Count Register */
65#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
66#define ONEMS 0xb0 /* One Millisecond register */
67#define UTS 0xb4 /* UART Test Register */
68#endif
69#ifdef CONFIG_ARCH_IMX
64#define BIPR1 0xb0 /* Incremental Preset Register 1 */ 70#define BIPR1 0xb0 /* Incremental Preset Register 1 */
65#define BIPR2 0xb4 /* Incremental Preset Register 2 */ 71#define BIPR2 0xb4 /* Incremental Preset Register 2 */
66#define BIPR3 0xb8 /* Incremental Preset Register 3 */ 72#define BIPR3 0xb8 /* Incremental Preset Register 3 */
@@ -70,6 +76,7 @@
70#define BMPR3 0xc8 /* BRM Modulator Register 3 */ 76#define BMPR3 0xc8 /* BRM Modulator Register 3 */
71#define BMPR4 0xcc /* BRM Modulator Register 4 */ 77#define BMPR4 0xcc /* BRM Modulator Register 4 */
72#define UTS 0xd0 /* UART Test Register */ 78#define UTS 0xd0 /* UART Test Register */
79#endif
73 80
74/* UART Control Register Bit Fields.*/ 81/* UART Control Register Bit Fields.*/
75#define URXD_CHARRDY (1<<15) 82#define URXD_CHARRDY (1<<15)
@@ -89,7 +96,12 @@
89#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 96#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
90#define UCR1_SNDBRK (1<<4) /* Send break */ 97#define UCR1_SNDBRK (1<<4) /* Send break */
91#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 98#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
99#ifdef CONFIG_ARCH_IMX
92#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */ 100#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */
101#endif
102#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
103#define UCR1_UARTCLKEN (0) /* not present on mx2/mx3 */
104#endif
93#define UCR1_DOZE (1<<1) /* Doze */ 105#define UCR1_DOZE (1<<1) /* Doze */
94#define UCR1_UARTEN (1<<0) /* UART enabled */ 106#define UCR1_UARTEN (1<<0) /* UART enabled */
95#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ 107#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -163,8 +175,19 @@
163#define UTS_SOFTRST (1<<0) /* Software reset */ 175#define UTS_SOFTRST (1<<0) /* Software reset */
164 176
165/* We've been assigned a range on the "Low-density serial ports" major */ 177/* We've been assigned a range on the "Low-density serial ports" major */
178#ifdef CONFIG_ARCH_IMX
166#define SERIAL_IMX_MAJOR 204 179#define SERIAL_IMX_MAJOR 204
167#define MINOR_START 41 180#define MINOR_START 41
181#define DEV_NAME "ttySMX"
182#define MAX_INTERNAL_IRQ IMX_IRQS
183#endif
184
185#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
186#define SERIAL_IMX_MAJOR 207
187#define MINOR_START 16
188#define DEV_NAME "ttymxc"
189#define MAX_INTERNAL_IRQ MXC_MAX_INT_LINES
190#endif
168 191
169/* 192/*
170 * This determines how often we check the modem status signals 193 * This determines how often we check the modem status signals
@@ -176,12 +199,15 @@
176 199
177#define DRIVER_NAME "IMX-uart" 200#define DRIVER_NAME "IMX-uart"
178 201
202#define UART_NR 8
203
179struct imx_port { 204struct imx_port {
180 struct uart_port port; 205 struct uart_port port;
181 struct timer_list timer; 206 struct timer_list timer;
182 unsigned int old_status; 207 unsigned int old_status;
183 int txirq,rxirq,rtsirq; 208 int txirq,rxirq,rtsirq;
184 int have_rtscts:1; 209 int have_rtscts:1;
210 struct clk *clk;
185}; 211};
186 212
187/* 213/*
@@ -405,6 +431,26 @@ out:
405 return IRQ_HANDLED; 431 return IRQ_HANDLED;
406} 432}
407 433
434static irqreturn_t imx_int(int irq, void *dev_id)
435{
436 struct imx_port *sport = dev_id;
437 unsigned int sts;
438
439 sts = readl(sport->port.membase + USR1);
440
441 if (sts & USR1_RRDY)
442 imx_rxint(irq, dev_id);
443
444 if (sts & USR1_TRDY &&
445 readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
446 imx_txint(irq, dev_id);
447
448 if (sts & USR1_RTSS)
449 imx_rtsint(irq, dev_id);
450
451 return IRQ_HANDLED;
452}
453
408/* 454/*
409 * Return TIOCSER_TEMT when transmitter is not busy. 455 * Return TIOCSER_TEMT when transmitter is not busy.
410 */ 456 */
@@ -477,7 +523,8 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
477 * RFDIV is set such way to satisfy requested uartclk value 523 * RFDIV is set such way to satisfy requested uartclk value
478 */ 524 */
479 val = TXTL << 10 | RXTL; 525 val = TXTL << 10 | RXTL;
480 ufcr_rfdiv = (imx_get_perclk1() + sport->port.uartclk / 2) / sport->port.uartclk; 526 ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
527 / sport->port.uartclk;
481 528
482 if(!ufcr_rfdiv) 529 if(!ufcr_rfdiv)
483 ufcr_rfdiv = 1; 530 ufcr_rfdiv = 1;
@@ -509,21 +556,34 @@ static int imx_startup(struct uart_port *port)
509 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 556 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
510 557
511 /* 558 /*
512 * Allocate the IRQ 559 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
560 * chips only have one interrupt.
513 */ 561 */
514 retval = request_irq(sport->rxirq, imx_rxint, 0, 562 if (sport->txirq > 0) {
515 DRIVER_NAME, sport); 563 retval = request_irq(sport->rxirq, imx_rxint, 0,
516 if (retval) goto error_out1; 564 DRIVER_NAME, sport);
517 565 if (retval)
518 retval = request_irq(sport->txirq, imx_txint, 0, 566 goto error_out1;
519 DRIVER_NAME, sport); 567
520 if (retval) goto error_out2; 568 retval = request_irq(sport->txirq, imx_txint, 0,
521 569 DRIVER_NAME, sport);
522 retval = request_irq(sport->rtsirq, imx_rtsint, 570 if (retval)
523 (sport->rtsirq < IMX_IRQS) ? 0 : 571 goto error_out2;
572
573 retval = request_irq(sport->rtsirq, imx_rtsint,
574 (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
524 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 575 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
525 DRIVER_NAME, sport); 576 DRIVER_NAME, sport);
526 if (retval) goto error_out3; 577 if (retval)
578 goto error_out3;
579 } else {
580 retval = request_irq(sport->port.irq, imx_int, 0,
581 DRIVER_NAME, sport);
582 if (retval) {
583 free_irq(sport->port.irq, sport);
584 goto error_out1;
585 }
586 }
527 587
528 /* 588 /*
529 * Finally, clear and enable interrupts 589 * Finally, clear and enable interrupts
@@ -548,9 +608,11 @@ static int imx_startup(struct uart_port *port)
548 return 0; 608 return 0;
549 609
550error_out3: 610error_out3:
551 free_irq(sport->txirq, sport); 611 if (sport->txirq)
612 free_irq(sport->txirq, sport);
552error_out2: 613error_out2:
553 free_irq(sport->rxirq, sport); 614 if (sport->rxirq)
615 free_irq(sport->rxirq, sport);
554error_out1: 616error_out1:
555 return retval; 617 return retval;
556} 618}
@@ -568,9 +630,12 @@ static void imx_shutdown(struct uart_port *port)
568 /* 630 /*
569 * Free the interrupts 631 * Free the interrupts
570 */ 632 */
571 free_irq(sport->rtsirq, sport); 633 if (sport->txirq > 0) {
572 free_irq(sport->txirq, sport); 634 free_irq(sport->rtsirq, sport);
573 free_irq(sport->rxirq, sport); 635 free_irq(sport->txirq, sport);
636 free_irq(sport->rxirq, sport);
637 } else
638 free_irq(sport->port.irq, sport);
574 639
575 /* 640 /*
576 * Disable all interrupts, port and break condition. 641 * Disable all interrupts, port and break condition.
@@ -589,6 +654,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
589 unsigned long flags; 654 unsigned long flags;
590 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot; 655 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
591 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; 656 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
657 unsigned int div, num, denom, ufcr;
592 658
593 /* 659 /*
594 * If we don't support modem control lines, don't allow 660 * If we don't support modem control lines, don't allow
@@ -634,7 +700,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
634 /* 700 /*
635 * Ask the core to calculate the divisor for us. 701 * Ask the core to calculate the divisor for us.
636 */ 702 */
637 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 703 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
638 quot = uart_get_divisor(port, baud); 704 quot = uart_get_divisor(port, baud);
639 705
640 spin_lock_irqsave(&sport->port.lock, flags); 706 spin_lock_irqsave(&sport->port.lock, flags);
@@ -684,14 +750,41 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
684 sport->port.membase + UCR2); 750 sport->port.membase + UCR2);
685 old_txrxen &= (UCR2_TXEN | UCR2_RXEN); 751 old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
686 752
687 /* set the baud rate. We assume uartclk = 16 MHz 753 div = sport->port.uartclk / (baud * 16);
688 * 754 if (div > 7)
689 * baud * 16 UBIR - 1 755 div = 7;
690 * --------- = -------- 756 if (!div)
691 * uartclk UBMR - 1 757 div = 1;
692 */ 758
693 writel((baud / 100) - 1, sport->port.membase + UBIR); 759 num = baud;
694 writel(10000 - 1, sport->port.membase + UBMR); 760 denom = port->uartclk / div / 16;
761
762 /* shift num and denom right until they fit into 16 bits */
763 while (num > 0x10000 || denom > 0x10000) {
764 num >>= 1;
765 denom >>= 1;
766 }
767 if (num > 0)
768 num -= 1;
769 if (denom > 0)
770 denom -= 1;
771
772 writel(num, sport->port.membase + UBIR);
773 writel(denom, sport->port.membase + UBMR);
774
775 if (div == 7)
776 div = 6; /* 6 in RFDIV means divide by 7 */
777 else
778 div = 6 - div;
779
780 ufcr = readl(sport->port.membase + UFCR);
781 ufcr = (ufcr & (~UFCR_RFDIV)) |
782 (div << 7);
783 writel(ufcr, sport->port.membase + UFCR);
784
785#ifdef ONEMS
786 writel(sport->port.uartclk / div / 1000, sport->port.membase + ONEMS);
787#endif
695 788
696 writel(old_ucr1, sport->port.membase + UCR1); 789 writel(old_ucr1, sport->port.membase + UCR1);
697 790
@@ -801,65 +894,7 @@ static struct uart_ops imx_pops = {
801 .verify_port = imx_verify_port, 894 .verify_port = imx_verify_port,
802}; 895};
803 896
804static struct imx_port imx_ports[] = { 897static struct imx_port *imx_ports[UART_NR];
805 {
806 .txirq = UART1_MINT_TX,
807 .rxirq = UART1_MINT_RX,
808 .rtsirq = UART1_MINT_RTS,
809 .port = {
810 .type = PORT_IMX,
811 .iotype = UPIO_MEM,
812 .membase = (void *)IMX_UART1_BASE,
813 .mapbase = 0x00206000,
814 .irq = UART1_MINT_RX,
815 .uartclk = 16000000,
816 .fifosize = 32,
817 .flags = UPF_BOOT_AUTOCONF,
818 .ops = &imx_pops,
819 .line = 0,
820 },
821 }, {
822 .txirq = UART2_MINT_TX,
823 .rxirq = UART2_MINT_RX,
824 .rtsirq = UART2_MINT_RTS,
825 .port = {
826 .type = PORT_IMX,
827 .iotype = UPIO_MEM,
828 .membase = (void *)IMX_UART2_BASE,
829 .mapbase = 0x00207000,
830 .irq = UART2_MINT_RX,
831 .uartclk = 16000000,
832 .fifosize = 32,
833 .flags = UPF_BOOT_AUTOCONF,
834 .ops = &imx_pops,
835 .line = 1,
836 },
837 }
838};
839
840/*
841 * Setup the IMX serial ports.
842 * Note also that we support "console=ttySMXx" where "x" is either 0 or 1.
843 * Which serial port this ends up being depends on the machine you're
844 * running this kernel on. I'm not convinced that this is a good idea,
845 * but that's the way it traditionally works.
846 *
847 */
848static void __init imx_init_ports(void)
849{
850 static int first = 1;
851 int i;
852
853 if (!first)
854 return;
855 first = 0;
856
857 for (i = 0; i < ARRAY_SIZE(imx_ports); i++) {
858 init_timer(&imx_ports[i].timer);
859 imx_ports[i].timer.function = imx_timeout;
860 imx_ports[i].timer.data = (unsigned long)&imx_ports[i];
861 }
862}
863 898
864#ifdef CONFIG_SERIAL_IMX_CONSOLE 899#ifdef CONFIG_SERIAL_IMX_CONSOLE
865static void imx_console_putchar(struct uart_port *port, int ch) 900static void imx_console_putchar(struct uart_port *port, int ch)
@@ -878,7 +913,7 @@ static void imx_console_putchar(struct uart_port *port, int ch)
878static void 913static void
879imx_console_write(struct console *co, const char *s, unsigned int count) 914imx_console_write(struct console *co, const char *s, unsigned int count)
880{ 915{
881 struct imx_port *sport = &imx_ports[co->index]; 916 struct imx_port *sport = imx_ports[co->index];
882 unsigned int old_ucr1, old_ucr2; 917 unsigned int old_ucr1, old_ucr2;
883 918
884 /* 919 /*
@@ -944,7 +979,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
944 else 979 else
945 ucfr_rfdiv = 6 - ucfr_rfdiv; 980 ucfr_rfdiv = 6 - ucfr_rfdiv;
946 981
947 uartclk = imx_get_perclk1(); 982 uartclk = clk_get_rate(sport->clk);
948 uartclk /= ucfr_rfdiv; 983 uartclk /= ucfr_rfdiv;
949 984
950 { /* 985 { /*
@@ -984,7 +1019,7 @@ imx_console_setup(struct console *co, char *options)
984 */ 1019 */
985 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) 1020 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
986 co->index = 0; 1021 co->index = 0;
987 sport = &imx_ports[co->index]; 1022 sport = imx_ports[co->index];
988 1023
989 if (options) 1024 if (options)
990 uart_parse_options(options, &baud, &parity, &bits, &flow); 1025 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -998,7 +1033,7 @@ imx_console_setup(struct console *co, char *options)
998 1033
999static struct uart_driver imx_reg; 1034static struct uart_driver imx_reg;
1000static struct console imx_console = { 1035static struct console imx_console = {
1001 .name = "ttySMX", 1036 .name = DEV_NAME,
1002 .write = imx_console_write, 1037 .write = imx_console_write,
1003 .device = uart_console_device, 1038 .device = uart_console_device,
1004 .setup = imx_console_setup, 1039 .setup = imx_console_setup,
@@ -1007,14 +1042,6 @@ static struct console imx_console = {
1007 .data = &imx_reg, 1042 .data = &imx_reg,
1008}; 1043};
1009 1044
1010static int __init imx_rs_console_init(void)
1011{
1012 imx_init_ports();
1013 register_console(&imx_console);
1014 return 0;
1015}
1016console_initcall(imx_rs_console_init);
1017
1018#define IMX_CONSOLE &imx_console 1045#define IMX_CONSOLE &imx_console
1019#else 1046#else
1020#define IMX_CONSOLE NULL 1047#define IMX_CONSOLE NULL
@@ -1023,7 +1050,7 @@ console_initcall(imx_rs_console_init);
1023static struct uart_driver imx_reg = { 1050static struct uart_driver imx_reg = {
1024 .owner = THIS_MODULE, 1051 .owner = THIS_MODULE,
1025 .driver_name = DRIVER_NAME, 1052 .driver_name = DRIVER_NAME,
1026 .dev_name = "ttySMX", 1053 .dev_name = DEV_NAME,
1027 .major = SERIAL_IMX_MAJOR, 1054 .major = SERIAL_IMX_MAJOR,
1028 .minor = MINOR_START, 1055 .minor = MINOR_START,
1029 .nr = ARRAY_SIZE(imx_ports), 1056 .nr = ARRAY_SIZE(imx_ports),
@@ -1050,29 +1077,98 @@ static int serial_imx_resume(struct platform_device *dev)
1050 return 0; 1077 return 0;
1051} 1078}
1052 1079
1053static int serial_imx_probe(struct platform_device *dev) 1080static int serial_imx_probe(struct platform_device *pdev)
1054{ 1081{
1082 struct imx_port *sport;
1055 struct imxuart_platform_data *pdata; 1083 struct imxuart_platform_data *pdata;
1084 void __iomem *base;
1085 int ret = 0;
1086 struct resource *res;
1087
1088 sport = kzalloc(sizeof(*sport), GFP_KERNEL);
1089 if (!sport)
1090 return -ENOMEM;
1056 1091
1057 imx_ports[dev->id].port.dev = &dev->dev; 1092 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1093 if (!res) {
1094 ret = -ENODEV;
1095 goto free;
1096 }
1097
1098 base = ioremap(res->start, PAGE_SIZE);
1099 if (!base) {
1100 ret = -ENOMEM;
1101 goto free;
1102 }
1103
1104 sport->port.dev = &pdev->dev;
1105 sport->port.mapbase = res->start;
1106 sport->port.membase = base;
1107 sport->port.type = PORT_IMX,
1108 sport->port.iotype = UPIO_MEM;
1109 sport->port.irq = platform_get_irq(pdev, 0);
1110 sport->rxirq = platform_get_irq(pdev, 0);
1111 sport->txirq = platform_get_irq(pdev, 1);
1112 sport->rtsirq = platform_get_irq(pdev, 2);
1113 sport->port.fifosize = 32;
1114 sport->port.ops = &imx_pops;
1115 sport->port.flags = UPF_BOOT_AUTOCONF;
1116 sport->port.line = pdev->id;
1117 init_timer(&sport->timer);
1118 sport->timer.function = imx_timeout;
1119 sport->timer.data = (unsigned long)sport;
1120
1121 sport->clk = clk_get(&pdev->dev, "uart_clk");
1122 if (IS_ERR(sport->clk)) {
1123 ret = PTR_ERR(sport->clk);
1124 goto unmap;
1125 }
1126 clk_enable(sport->clk);
1058 1127
1059 pdata = (struct imxuart_platform_data *)dev->dev.platform_data; 1128 sport->port.uartclk = clk_get_rate(sport->clk);
1129
1130 imx_ports[pdev->id] = sport;
1131
1132 pdata = pdev->dev.platform_data;
1060 if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS)) 1133 if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
1061 imx_ports[dev->id].have_rtscts = 1; 1134 sport->have_rtscts = 1;
1135
1136 if (pdata->init)
1137 pdata->init(pdev);
1138
1139 uart_add_one_port(&imx_reg, &sport->port);
1140 platform_set_drvdata(pdev, &sport->port);
1062 1141
1063 uart_add_one_port(&imx_reg, &imx_ports[dev->id].port);
1064 platform_set_drvdata(dev, &imx_ports[dev->id]);
1065 return 0; 1142 return 0;
1143unmap:
1144 iounmap(sport->port.membase);
1145free:
1146 kfree(sport);
1147
1148 return ret;
1066} 1149}
1067 1150
1068static int serial_imx_remove(struct platform_device *dev) 1151static int serial_imx_remove(struct platform_device *pdev)
1069{ 1152{
1070 struct imx_port *sport = platform_get_drvdata(dev); 1153 struct imxuart_platform_data *pdata;
1154 struct imx_port *sport = platform_get_drvdata(pdev);
1071 1155
1072 platform_set_drvdata(dev, NULL); 1156 pdata = pdev->dev.platform_data;
1073 1157
1074 if (sport) 1158 platform_set_drvdata(pdev, NULL);
1159
1160 if (sport) {
1075 uart_remove_one_port(&imx_reg, &sport->port); 1161 uart_remove_one_port(&imx_reg, &sport->port);
1162 clk_put(sport->clk);
1163 }
1164
1165 clk_disable(sport->clk);
1166
1167 if (pdata->exit)
1168 pdata->exit(pdev);
1169
1170 iounmap(sport->port.membase);
1171 kfree(sport);
1076 1172
1077 return 0; 1173 return 0;
1078} 1174}
@@ -1095,8 +1191,6 @@ static int __init imx_serial_init(void)
1095 1191
1096 printk(KERN_INFO "Serial: IMX driver\n"); 1192 printk(KERN_INFO "Serial: IMX driver\n");
1097 1193
1098 imx_init_ports();
1099
1100 ret = uart_register_driver(&imx_reg); 1194 ret = uart_register_driver(&imx_reg);
1101 if (ret) 1195 if (ret)
1102 return ret; 1196 return ret;
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
new file mode 100644
index 000000000000..a1102053e553
--- /dev/null
+++ b/drivers/serial/s3c2400.c
@@ -0,0 +1,106 @@
1/* linux/drivers/serial/s3c240.c
2 *
3 * Driver for Samsung SoC onboard UARTs.
4 *
5 * Ben Dooks, Copyright (c) 2003-2005 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/module.h>
14#include <linux/ioport.h>
15#include <linux/io.h>
16#include <linux/platform_device.h>
17
18#include <asm/irq.h>
19
20#include <asm/hardware.h>
21
22#include <asm/plat-s3c/regs-serial.h>
23#include <asm/arch/regs-gpio.h>
24
25#include "samsung.h"
26
27static int s3c2400_serial_getsource(struct uart_port *port,
28 struct s3c24xx_uart_clksrc *clk)
29{
30 clk->divisor = 1;
31 clk->name = "pclk";
32
33 return 0;
34}
35
36static int s3c2400_serial_setsource(struct uart_port *port,
37 struct s3c24xx_uart_clksrc *clk)
38{
39 return 0;
40}
41
42static int s3c2400_serial_resetport(struct uart_port *port,
43 struct s3c2410_uartcfg *cfg)
44{
45 dbg("s3c2400_serial_resetport: port=%p (%08lx), cfg=%p\n",
46 port, port->mapbase, cfg);
47
48 wr_regl(port, S3C2410_UCON, cfg->ucon);
49 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
50
51 /* reset both fifos */
52
53 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
54 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
55
56 return 0;
57}
58
59static struct s3c24xx_uart_info s3c2400_uart_inf = {
60 .name = "Samsung S3C2400 UART",
61 .type = PORT_S3C2400,
62 .fifosize = 16,
63 .rx_fifomask = S3C2410_UFSTAT_RXMASK,
64 .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
65 .rx_fifofull = S3C2410_UFSTAT_RXFULL,
66 .tx_fifofull = S3C2410_UFSTAT_TXFULL,
67 .tx_fifomask = S3C2410_UFSTAT_TXMASK,
68 .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
69 .get_clksrc = s3c2400_serial_getsource,
70 .set_clksrc = s3c2400_serial_setsource,
71 .reset_port = s3c2400_serial_resetport,
72};
73
74static int s3c2400_serial_probe(struct platform_device *dev)
75{
76 return s3c24xx_serial_probe(dev, &s3c2400_uart_inf);
77}
78
79static struct platform_driver s3c2400_serial_drv = {
80 .probe = s3c2400_serial_probe,
81 .remove = s3c24xx_serial_remove,
82 .driver = {
83 .name = "s3c2400-uart",
84 .owner = THIS_MODULE,
85 },
86};
87
88s3c24xx_console_init(&s3c2400_serial_drv, &s3c2400_uart_inf);
89
90static inline int s3c2400_serial_init(void)
91{
92 return s3c24xx_serial_init(&s3c2400_serial_drv, &s3c2400_uart_inf);
93}
94
95static inline void s3c2400_serial_exit(void)
96{
97 platform_driver_unregister(&s3c2400_serial_drv);
98}
99
100module_init(s3c2400_serial_init);
101module_exit(s3c2400_serial_exit);
102
103MODULE_LICENSE("GPL v2");
104MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
105MODULE_DESCRIPTION("Samsung S3C2400 SoC Serial port driver");
106MODULE_ALIAS("platform:s3c2400-uart");
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 2b6a013639e6..c5f03f41686f 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -1,1270 +1,30 @@
1/* 1/* linux/drivers/serial/s3c2410.c
2 * linux/drivers/serial/s3c2410.c
3 * 2 *
4 * Driver for onboard UARTs on the Samsung S3C24XX 3 * Driver for Samsung S3C2410 SoC onboard UARTs.
5 * 4 *
6 * Based on drivers/char/serial.c and drivers/char/21285.c 5 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 * 7 *
8 * Ben Dooks, (c) 2003-2005 Simtec Electronics 8 * This program is free software; you can redistribute it and/or modify
9 * http://www.simtec.co.uk/products/SWLINUX/ 9 * it under the terms of the GNU General Public License version 2 as
10 * 10 * published by the Free Software Foundation.
11 * Changelog:
12 *
13 * 22-Jul-2004 BJD Finished off device rewrite
14 *
15 * 21-Jul-2004 BJD Thanks to <herbet@13thfloor.at> for pointing out
16 * problems with baud rate and loss of IR settings. Update
17 * to add configuration via platform_device structure
18 *
19 * 28-Sep-2004 BJD Re-write for the following items
20 * - S3C2410 and S3C2440 serial support
21 * - Power Management support
22 * - Fix console via IrDA devices
23 * - SysReq (Herbert Pötzl)
24 * - Break character handling (Herbert Pötzl)
25 * - spin-lock initialisation (Dimitry Andric)
26 * - added clock control
27 * - updated init code to use platform_device info
28 *
29 * 06-Mar-2005 BJD Add s3c2440 fclk clock source
30 *
31 * 09-Mar-2005 BJD Add s3c2400 support
32 *
33 * 10-Mar-2005 LCVR Changed S3C2410_VA_UART to S3C24XX_VA_UART
34*/
35
36/* Note on 2440 fclk clock source handling
37 *
38 * Whilst it is possible to use the fclk as clock source, the method
39 * of properly switching too/from this is currently un-implemented, so
40 * whichever way is configured at startup is the one that will be used.
41*/
42
43/* Hote on 2410 error handling
44 *
45 * The s3c2410 manual has a love/hate affair with the contents of the
46 * UERSTAT register in the UART blocks, and keeps marking some of the
47 * error bits as reserved. Having checked with the s3c2410x01,
48 * it copes with BREAKs properly, so I am happy to ignore the RESERVED
49 * feature from the latter versions of the manual.
50 *
51 * If it becomes aparrent that latter versions of the 2410 remove these
52 * bits, then action will have to be taken to differentiate the versions
53 * and change the policy on BREAK
54 *
55 * BJD, 04-Nov-2004
56*/ 11*/
57 12
58
59#if defined(CONFIG_SERIAL_S3C2410_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
60#define SUPPORT_SYSRQ
61#endif
62
63#include <linux/module.h> 13#include <linux/module.h>
64#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/io.h>
65#include <linux/platform_device.h> 16#include <linux/platform_device.h>
66#include <linux/init.h> 17#include <linux/init.h>
67#include <linux/sysrq.h>
68#include <linux/console.h>
69#include <linux/tty.h>
70#include <linux/tty_flip.h>
71#include <linux/serial_core.h> 18#include <linux/serial_core.h>
72#include <linux/serial.h> 19#include <linux/serial.h>
73#include <linux/delay.h>
74#include <linux/clk.h>
75 20
76#include <asm/io.h>
77#include <asm/irq.h> 21#include <asm/irq.h>
78
79#include <asm/hardware.h> 22#include <asm/hardware.h>
80 23
81#include <asm/plat-s3c/regs-serial.h> 24#include <asm/plat-s3c/regs-serial.h>
82#include <asm/arch/regs-gpio.h> 25#include <asm/arch/regs-gpio.h>
83 26
84/* structures */ 27#include "samsung.h"
85
86struct s3c24xx_uart_info {
87 char *name;
88 unsigned int type;
89 unsigned int fifosize;
90 unsigned long rx_fifomask;
91 unsigned long rx_fifoshift;
92 unsigned long rx_fifofull;
93 unsigned long tx_fifomask;
94 unsigned long tx_fifoshift;
95 unsigned long tx_fifofull;
96
97 /* clock source control */
98
99 int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
100 int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
101
102 /* uart controls */
103 int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
104};
105
106struct s3c24xx_uart_port {
107 unsigned char rx_claimed;
108 unsigned char tx_claimed;
109
110 struct s3c24xx_uart_info *info;
111 struct s3c24xx_uart_clksrc *clksrc;
112 struct clk *clk;
113 struct clk *baudclk;
114 struct uart_port port;
115};
116
117
118/* configuration defines */
119
120#if 0
121#if 1
122/* send debug to the low-level output routines */
123
124extern void printascii(const char *);
125
126static void
127s3c24xx_serial_dbg(const char *fmt, ...)
128{
129 va_list va;
130 char buff[256];
131
132 va_start(va, fmt);
133 vsprintf(buff, fmt, va);
134 va_end(va);
135
136 printascii(buff);
137}
138
139#define dbg(x...) s3c24xx_serial_dbg(x)
140
141#else
142#define dbg(x...) printk(KERN_DEBUG "s3c24xx: ");
143#endif
144#else /* no debug */
145#define dbg(x...) do {} while(0)
146#endif
147
148/* UART name and device definitions */
149
150#define S3C24XX_SERIAL_NAME "ttySAC"
151#define S3C24XX_SERIAL_MAJOR 204
152#define S3C24XX_SERIAL_MINOR 64
153
154
155/* conversion functions */
156
157#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
158#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
159
160/* we can support 3 uarts, but not always use them */
161
162#ifdef CONFIG_CPU_S3C2400
163#define NR_PORTS (2)
164#else
165#define NR_PORTS (3)
166#endif
167
168/* port irq numbers */
169
170#define TX_IRQ(port) ((port)->irq + 1)
171#define RX_IRQ(port) ((port)->irq)
172
173/* register access controls */
174
175#define portaddr(port, reg) ((port)->membase + (reg))
176
177#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
178#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
179
180#define wr_regb(port, reg, val) \
181 do { __raw_writeb(val, portaddr(port, reg)); } while(0)
182
183#define wr_regl(port, reg, val) \
184 do { __raw_writel(val, portaddr(port, reg)); } while(0)
185
186/* macros to change one thing to another */
187
188#define tx_enabled(port) ((port)->unused[0])
189#define rx_enabled(port) ((port)->unused[1])
190
191/* flag to ignore all characters comming in */
192#define RXSTAT_DUMMY_READ (0x10000000)
193
194static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
195{
196 return container_of(port, struct s3c24xx_uart_port, port);
197}
198
199/* translate a port to the device name */
200
201static inline const char *s3c24xx_serial_portname(struct uart_port *port)
202{
203 return to_platform_device(port->dev)->name;
204}
205
206static int s3c24xx_serial_txempty_nofifo(struct uart_port *port)
207{
208 return (rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE);
209}
210
211static void s3c24xx_serial_rx_enable(struct uart_port *port)
212{
213 unsigned long flags;
214 unsigned int ucon, ufcon;
215 int count = 10000;
216
217 spin_lock_irqsave(&port->lock, flags);
218
219 while (--count && !s3c24xx_serial_txempty_nofifo(port))
220 udelay(100);
221
222 ufcon = rd_regl(port, S3C2410_UFCON);
223 ufcon |= S3C2410_UFCON_RESETRX;
224 wr_regl(port, S3C2410_UFCON, ufcon);
225
226 ucon = rd_regl(port, S3C2410_UCON);
227 ucon |= S3C2410_UCON_RXIRQMODE;
228 wr_regl(port, S3C2410_UCON, ucon);
229
230 rx_enabled(port) = 1;
231 spin_unlock_irqrestore(&port->lock, flags);
232}
233
234static void s3c24xx_serial_rx_disable(struct uart_port *port)
235{
236 unsigned long flags;
237 unsigned int ucon;
238
239 spin_lock_irqsave(&port->lock, flags);
240
241 ucon = rd_regl(port, S3C2410_UCON);
242 ucon &= ~S3C2410_UCON_RXIRQMODE;
243 wr_regl(port, S3C2410_UCON, ucon);
244
245 rx_enabled(port) = 0;
246 spin_unlock_irqrestore(&port->lock, flags);
247}
248
249static void s3c24xx_serial_stop_tx(struct uart_port *port)
250{
251 if (tx_enabled(port)) {
252 disable_irq(TX_IRQ(port));
253 tx_enabled(port) = 0;
254 if (port->flags & UPF_CONS_FLOW)
255 s3c24xx_serial_rx_enable(port);
256 }
257}
258
259static void s3c24xx_serial_start_tx(struct uart_port *port)
260{
261 if (!tx_enabled(port)) {
262 if (port->flags & UPF_CONS_FLOW)
263 s3c24xx_serial_rx_disable(port);
264
265 enable_irq(TX_IRQ(port));
266 tx_enabled(port) = 1;
267 }
268}
269
270
271static void s3c24xx_serial_stop_rx(struct uart_port *port)
272{
273 if (rx_enabled(port)) {
274 dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
275 disable_irq(RX_IRQ(port));
276 rx_enabled(port) = 0;
277 }
278}
279
280static void s3c24xx_serial_enable_ms(struct uart_port *port)
281{
282}
283
284static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port)
285{
286 return to_ourport(port)->info;
287}
288
289static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
290{
291 if (port->dev == NULL)
292 return NULL;
293
294 return (struct s3c2410_uartcfg *)port->dev->platform_data;
295}
296
297static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
298 unsigned long ufstat)
299{
300 struct s3c24xx_uart_info *info = ourport->info;
301
302 if (ufstat & info->rx_fifofull)
303 return info->fifosize;
304
305 return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
306}
307
308
309/* ? - where has parity gone?? */
310#define S3C2410_UERSTAT_PARITY (0x1000)
311
312static irqreturn_t
313s3c24xx_serial_rx_chars(int irq, void *dev_id)
314{
315 struct s3c24xx_uart_port *ourport = dev_id;
316 struct uart_port *port = &ourport->port;
317 struct tty_struct *tty = port->info->tty;
318 unsigned int ufcon, ch, flag, ufstat, uerstat;
319 int max_count = 64;
320
321 while (max_count-- > 0) {
322 ufcon = rd_regl(port, S3C2410_UFCON);
323 ufstat = rd_regl(port, S3C2410_UFSTAT);
324
325 if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
326 break;
327
328 uerstat = rd_regl(port, S3C2410_UERSTAT);
329 ch = rd_regb(port, S3C2410_URXH);
330
331 if (port->flags & UPF_CONS_FLOW) {
332 int txe = s3c24xx_serial_txempty_nofifo(port);
333
334 if (rx_enabled(port)) {
335 if (!txe) {
336 rx_enabled(port) = 0;
337 continue;
338 }
339 } else {
340 if (txe) {
341 ufcon |= S3C2410_UFCON_RESETRX;
342 wr_regl(port, S3C2410_UFCON, ufcon);
343 rx_enabled(port) = 1;
344 goto out;
345 }
346 continue;
347 }
348 }
349
350 /* insert the character into the buffer */
351
352 flag = TTY_NORMAL;
353 port->icount.rx++;
354
355 if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
356 dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
357 ch, uerstat);
358
359 /* check for break */
360 if (uerstat & S3C2410_UERSTAT_BREAK) {
361 dbg("break!\n");
362 port->icount.brk++;
363 if (uart_handle_break(port))
364 goto ignore_char;
365 }
366
367 if (uerstat & S3C2410_UERSTAT_FRAME)
368 port->icount.frame++;
369 if (uerstat & S3C2410_UERSTAT_OVERRUN)
370 port->icount.overrun++;
371
372 uerstat &= port->read_status_mask;
373
374 if (uerstat & S3C2410_UERSTAT_BREAK)
375 flag = TTY_BREAK;
376 else if (uerstat & S3C2410_UERSTAT_PARITY)
377 flag = TTY_PARITY;
378 else if (uerstat & ( S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_OVERRUN))
379 flag = TTY_FRAME;
380 }
381
382 if (uart_handle_sysrq_char(port, ch))
383 goto ignore_char;
384
385 uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN, ch, flag);
386
387 ignore_char:
388 continue;
389 }
390 tty_flip_buffer_push(tty);
391
392 out:
393 return IRQ_HANDLED;
394}
395
396static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
397{
398 struct s3c24xx_uart_port *ourport = id;
399 struct uart_port *port = &ourport->port;
400 struct circ_buf *xmit = &port->info->xmit;
401 int count = 256;
402
403 if (port->x_char) {
404 wr_regb(port, S3C2410_UTXH, port->x_char);
405 port->icount.tx++;
406 port->x_char = 0;
407 goto out;
408 }
409
410 /* if there isnt anything more to transmit, or the uart is now
411 * stopped, disable the uart and exit
412 */
413
414 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
415 s3c24xx_serial_stop_tx(port);
416 goto out;
417 }
418
419 /* try and drain the buffer... */
420
421 while (!uart_circ_empty(xmit) && count-- > 0) {
422 if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
423 break;
424
425 wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
426 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
427 port->icount.tx++;
428 }
429
430 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
431 uart_write_wakeup(port);
432
433 if (uart_circ_empty(xmit))
434 s3c24xx_serial_stop_tx(port);
435
436 out:
437 return IRQ_HANDLED;
438}
439
440static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
441{
442 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
443 unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
444 unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
445
446 if (ufcon & S3C2410_UFCON_FIFOMODE) {
447 if ((ufstat & info->tx_fifomask) != 0 ||
448 (ufstat & info->tx_fifofull))
449 return 0;
450
451 return 1;
452 }
453
454 return s3c24xx_serial_txempty_nofifo(port);
455}
456
457/* no modem control lines */
458static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
459{
460 unsigned int umstat = rd_regb(port,S3C2410_UMSTAT);
461
462 if (umstat & S3C2410_UMSTAT_CTS)
463 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
464 else
465 return TIOCM_CAR | TIOCM_DSR;
466}
467
468static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
469{
470 /* todo - possibly remove AFC and do manual CTS */
471}
472
473static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
474{
475 unsigned long flags;
476 unsigned int ucon;
477
478 spin_lock_irqsave(&port->lock, flags);
479
480 ucon = rd_regl(port, S3C2410_UCON);
481
482 if (break_state)
483 ucon |= S3C2410_UCON_SBREAK;
484 else
485 ucon &= ~S3C2410_UCON_SBREAK;
486
487 wr_regl(port, S3C2410_UCON, ucon);
488
489 spin_unlock_irqrestore(&port->lock, flags);
490}
491
492static void s3c24xx_serial_shutdown(struct uart_port *port)
493{
494 struct s3c24xx_uart_port *ourport = to_ourport(port);
495
496 if (ourport->tx_claimed) {
497 free_irq(TX_IRQ(port), ourport);
498 tx_enabled(port) = 0;
499 ourport->tx_claimed = 0;
500 }
501
502 if (ourport->rx_claimed) {
503 free_irq(RX_IRQ(port), ourport);
504 ourport->rx_claimed = 0;
505 rx_enabled(port) = 0;
506 }
507}
508
509
510static int s3c24xx_serial_startup(struct uart_port *port)
511{
512 struct s3c24xx_uart_port *ourport = to_ourport(port);
513 int ret;
514
515 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
516 port->mapbase, port->membase);
517
518 rx_enabled(port) = 1;
519
520 ret = request_irq(RX_IRQ(port),
521 s3c24xx_serial_rx_chars, 0,
522 s3c24xx_serial_portname(port), ourport);
523
524 if (ret != 0) {
525 printk(KERN_ERR "cannot get irq %d\n", RX_IRQ(port));
526 return ret;
527 }
528
529 ourport->rx_claimed = 1;
530
531 dbg("requesting tx irq...\n");
532
533 tx_enabled(port) = 1;
534
535 ret = request_irq(TX_IRQ(port),
536 s3c24xx_serial_tx_chars, 0,
537 s3c24xx_serial_portname(port), ourport);
538
539 if (ret) {
540 printk(KERN_ERR "cannot get irq %d\n", TX_IRQ(port));
541 goto err;
542 }
543
544 ourport->tx_claimed = 1;
545
546 dbg("s3c24xx_serial_startup ok\n");
547
548 /* the port reset code should have done the correct
549 * register setup for the port controls */
550
551 return ret;
552
553 err:
554 s3c24xx_serial_shutdown(port);
555 return ret;
556}
557
558/* power power management control */
559
560static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
561 unsigned int old)
562{
563 struct s3c24xx_uart_port *ourport = to_ourport(port);
564
565 switch (level) {
566 case 3:
567 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
568 clk_disable(ourport->baudclk);
569
570 clk_disable(ourport->clk);
571 break;
572
573 case 0:
574 clk_enable(ourport->clk);
575
576 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
577 clk_enable(ourport->baudclk);
578
579 break;
580 default:
581 printk(KERN_ERR "s3c24xx_serial: unknown pm %d\n", level);
582 }
583}
584
585/* baud rate calculation
586 *
587 * The UARTs on the S3C2410/S3C2440 can take their clocks from a number
588 * of different sources, including the peripheral clock ("pclk") and an
589 * external clock ("uclk"). The S3C2440 also adds the core clock ("fclk")
590 * with a programmable extra divisor.
591 *
592 * The following code goes through the clock sources, and calculates the
593 * baud clocks (and the resultant actual baud rates) and then tries to
594 * pick the closest one and select that.
595 *
596*/
597
598
599#define MAX_CLKS (8)
600
601static struct s3c24xx_uart_clksrc tmp_clksrc = {
602 .name = "pclk",
603 .min_baud = 0,
604 .max_baud = 0,
605 .divisor = 1,
606};
607
608static inline int
609s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
610{
611 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
612
613 return (info->get_clksrc)(port, c);
614}
615
616static inline int
617s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
618{
619 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
620
621 return (info->set_clksrc)(port, c);
622}
623
624struct baud_calc {
625 struct s3c24xx_uart_clksrc *clksrc;
626 unsigned int calc;
627 unsigned int quot;
628 struct clk *src;
629};
630
631static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
632 struct uart_port *port,
633 struct s3c24xx_uart_clksrc *clksrc,
634 unsigned int baud)
635{
636 unsigned long rate;
637
638 calc->src = clk_get(port->dev, clksrc->name);
639 if (calc->src == NULL || IS_ERR(calc->src))
640 return 0;
641
642 rate = clk_get_rate(calc->src);
643 rate /= clksrc->divisor;
644
645 calc->clksrc = clksrc;
646 calc->quot = (rate + (8 * baud)) / (16 * baud);
647 calc->calc = (rate / (calc->quot * 16));
648
649 calc->quot--;
650 return 1;
651}
652
653static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
654 struct s3c24xx_uart_clksrc **clksrc,
655 struct clk **clk,
656 unsigned int baud)
657{
658 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
659 struct s3c24xx_uart_clksrc *clkp;
660 struct baud_calc res[MAX_CLKS];
661 struct baud_calc *resptr, *best, *sptr;
662 int i;
663
664 clkp = cfg->clocks;
665 best = NULL;
666
667 if (cfg->clocks_size < 2) {
668 if (cfg->clocks_size == 0)
669 clkp = &tmp_clksrc;
670
671 /* check to see if we're sourcing fclk, and if so we're
672 * going to have to update the clock source
673 */
674
675 if (strcmp(clkp->name, "fclk") == 0) {
676 struct s3c24xx_uart_clksrc src;
677
678 s3c24xx_serial_getsource(port, &src);
679
680 /* check that the port already using fclk, and if
681 * not, then re-select fclk
682 */
683
684 if (strcmp(src.name, clkp->name) == 0) {
685 s3c24xx_serial_setsource(port, clkp);
686 s3c24xx_serial_getsource(port, &src);
687 }
688
689 clkp->divisor = src.divisor;
690 }
691
692 s3c24xx_serial_calcbaud(res, port, clkp, baud);
693 best = res;
694 resptr = best + 1;
695 } else {
696 resptr = res;
697
698 for (i = 0; i < cfg->clocks_size; i++, clkp++) {
699 if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
700 resptr++;
701 }
702 }
703
704 /* ok, we now need to select the best clock we found */
705
706 if (!best) {
707 unsigned int deviation = (1<<30)|((1<<30)-1);
708 int calc_deviation;
709
710 for (sptr = res; sptr < resptr; sptr++) {
711 printk(KERN_DEBUG
712 "found clk %p (%s) quot %d, calc %d\n",
713 sptr->clksrc, sptr->clksrc->name,
714 sptr->quot, sptr->calc);
715
716 calc_deviation = baud - sptr->calc;
717 if (calc_deviation < 0)
718 calc_deviation = -calc_deviation;
719
720 if (calc_deviation < deviation) {
721 best = sptr;
722 deviation = calc_deviation;
723 }
724 }
725
726 printk(KERN_DEBUG "best %p (deviation %d)\n", best, deviation);
727 }
728
729 printk(KERN_DEBUG "selected clock %p (%s) quot %d, calc %d\n",
730 best->clksrc, best->clksrc->name, best->quot, best->calc);
731
732 /* store results to pass back */
733
734 *clksrc = best->clksrc;
735 *clk = best->src;
736
737 return best->quot;
738}
739
740static void s3c24xx_serial_set_termios(struct uart_port *port,
741 struct ktermios *termios,
742 struct ktermios *old)
743{
744 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
745 struct s3c24xx_uart_port *ourport = to_ourport(port);
746 struct s3c24xx_uart_clksrc *clksrc = NULL;
747 struct clk *clk = NULL;
748 unsigned long flags;
749 unsigned int baud, quot;
750 unsigned int ulcon;
751 unsigned int umcon;
752
753 /*
754 * We don't support modem control lines.
755 */
756 termios->c_cflag &= ~(HUPCL | CMSPAR);
757 termios->c_cflag |= CLOCAL;
758
759 /*
760 * Ask the core to calculate the divisor for us.
761 */
762
763 baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
764
765 if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
766 quot = port->custom_divisor;
767 else
768 quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
769
770 /* check to see if we need to change clock source */
771
772 if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
773 s3c24xx_serial_setsource(port, clksrc);
774
775 if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
776 clk_disable(ourport->baudclk);
777 ourport->baudclk = NULL;
778 }
779
780 clk_enable(clk);
781
782 ourport->clksrc = clksrc;
783 ourport->baudclk = clk;
784 }
785
786 switch (termios->c_cflag & CSIZE) {
787 case CS5:
788 dbg("config: 5bits/char\n");
789 ulcon = S3C2410_LCON_CS5;
790 break;
791 case CS6:
792 dbg("config: 6bits/char\n");
793 ulcon = S3C2410_LCON_CS6;
794 break;
795 case CS7:
796 dbg("config: 7bits/char\n");
797 ulcon = S3C2410_LCON_CS7;
798 break;
799 case CS8:
800 default:
801 dbg("config: 8bits/char\n");
802 ulcon = S3C2410_LCON_CS8;
803 break;
804 }
805
806 /* preserve original lcon IR settings */
807 ulcon |= (cfg->ulcon & S3C2410_LCON_IRM);
808
809 if (termios->c_cflag & CSTOPB)
810 ulcon |= S3C2410_LCON_STOPB;
811
812 umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
813
814 if (termios->c_cflag & PARENB) {
815 if (termios->c_cflag & PARODD)
816 ulcon |= S3C2410_LCON_PODD;
817 else
818 ulcon |= S3C2410_LCON_PEVEN;
819 } else {
820 ulcon |= S3C2410_LCON_PNONE;
821 }
822
823 spin_lock_irqsave(&port->lock, flags);
824
825 dbg("setting ulcon to %08x, brddiv to %d\n", ulcon, quot);
826
827 wr_regl(port, S3C2410_ULCON, ulcon);
828 wr_regl(port, S3C2410_UBRDIV, quot);
829 wr_regl(port, S3C2410_UMCON, umcon);
830
831 dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
832 rd_regl(port, S3C2410_ULCON),
833 rd_regl(port, S3C2410_UCON),
834 rd_regl(port, S3C2410_UFCON));
835
836 /*
837 * Update the per-port timeout.
838 */
839 uart_update_timeout(port, termios->c_cflag, baud);
840
841 /*
842 * Which character status flags are we interested in?
843 */
844 port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
845 if (termios->c_iflag & INPCK)
846 port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY;
847
848 /*
849 * Which character status flags should we ignore?
850 */
851 port->ignore_status_mask = 0;
852 if (termios->c_iflag & IGNPAR)
853 port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN;
854 if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
855 port->ignore_status_mask |= S3C2410_UERSTAT_FRAME;
856
857 /*
858 * Ignore all characters if CREAD is not set.
859 */
860 if ((termios->c_cflag & CREAD) == 0)
861 port->ignore_status_mask |= RXSTAT_DUMMY_READ;
862
863 spin_unlock_irqrestore(&port->lock, flags);
864}
865
866static const char *s3c24xx_serial_type(struct uart_port *port)
867{
868 switch (port->type) {
869 case PORT_S3C2410:
870 return "S3C2410";
871 case PORT_S3C2440:
872 return "S3C2440";
873 case PORT_S3C2412:
874 return "S3C2412";
875 default:
876 return NULL;
877 }
878}
879
880#define MAP_SIZE (0x100)
881
882static void s3c24xx_serial_release_port(struct uart_port *port)
883{
884 release_mem_region(port->mapbase, MAP_SIZE);
885}
886
887static int s3c24xx_serial_request_port(struct uart_port *port)
888{
889 const char *name = s3c24xx_serial_portname(port);
890 return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
891}
892
893static void s3c24xx_serial_config_port(struct uart_port *port, int flags)
894{
895 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
896
897 if (flags & UART_CONFIG_TYPE &&
898 s3c24xx_serial_request_port(port) == 0)
899 port->type = info->type;
900}
901
902/*
903 * verify the new serial_struct (for TIOCSSERIAL).
904 */
905static int
906s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
907{
908 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
909
910 if (ser->type != PORT_UNKNOWN && ser->type != info->type)
911 return -EINVAL;
912
913 return 0;
914}
915
916
917#ifdef CONFIG_SERIAL_S3C2410_CONSOLE
918
919static struct console s3c24xx_serial_console;
920
921#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
922#else
923#define S3C24XX_SERIAL_CONSOLE NULL
924#endif
925
926static struct uart_ops s3c24xx_serial_ops = {
927 .pm = s3c24xx_serial_pm,
928 .tx_empty = s3c24xx_serial_tx_empty,
929 .get_mctrl = s3c24xx_serial_get_mctrl,
930 .set_mctrl = s3c24xx_serial_set_mctrl,
931 .stop_tx = s3c24xx_serial_stop_tx,
932 .start_tx = s3c24xx_serial_start_tx,
933 .stop_rx = s3c24xx_serial_stop_rx,
934 .enable_ms = s3c24xx_serial_enable_ms,
935 .break_ctl = s3c24xx_serial_break_ctl,
936 .startup = s3c24xx_serial_startup,
937 .shutdown = s3c24xx_serial_shutdown,
938 .set_termios = s3c24xx_serial_set_termios,
939 .type = s3c24xx_serial_type,
940 .release_port = s3c24xx_serial_release_port,
941 .request_port = s3c24xx_serial_request_port,
942 .config_port = s3c24xx_serial_config_port,
943 .verify_port = s3c24xx_serial_verify_port,
944};
945
946
947static struct uart_driver s3c24xx_uart_drv = {
948 .owner = THIS_MODULE,
949 .dev_name = "s3c2410_serial",
950 .nr = 3,
951 .cons = S3C24XX_SERIAL_CONSOLE,
952 .driver_name = S3C24XX_SERIAL_NAME,
953 .major = S3C24XX_SERIAL_MAJOR,
954 .minor = S3C24XX_SERIAL_MINOR,
955};
956
957static struct s3c24xx_uart_port s3c24xx_serial_ports[NR_PORTS] = {
958 [0] = {
959 .port = {
960 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
961 .iotype = UPIO_MEM,
962 .irq = IRQ_S3CUART_RX0,
963 .uartclk = 0,
964 .fifosize = 16,
965 .ops = &s3c24xx_serial_ops,
966 .flags = UPF_BOOT_AUTOCONF,
967 .line = 0,
968 }
969 },
970 [1] = {
971 .port = {
972 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
973 .iotype = UPIO_MEM,
974 .irq = IRQ_S3CUART_RX1,
975 .uartclk = 0,
976 .fifosize = 16,
977 .ops = &s3c24xx_serial_ops,
978 .flags = UPF_BOOT_AUTOCONF,
979 .line = 1,
980 }
981 },
982#if NR_PORTS > 2
983
984 [2] = {
985 .port = {
986 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
987 .iotype = UPIO_MEM,
988 .irq = IRQ_S3CUART_RX2,
989 .uartclk = 0,
990 .fifosize = 16,
991 .ops = &s3c24xx_serial_ops,
992 .flags = UPF_BOOT_AUTOCONF,
993 .line = 2,
994 }
995 }
996#endif
997};
998
999/* s3c24xx_serial_resetport
1000 *
1001 * wrapper to call the specific reset for this port (reset the fifos
1002 * and the settings)
1003*/
1004
1005static inline int s3c24xx_serial_resetport(struct uart_port * port,
1006 struct s3c2410_uartcfg *cfg)
1007{
1008 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
1009
1010 return (info->reset_port)(port, cfg);
1011}
1012
1013/* s3c24xx_serial_init_port
1014 *
1015 * initialise a single serial port from the platform device given
1016 */
1017
1018static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
1019 struct s3c24xx_uart_info *info,
1020 struct platform_device *platdev)
1021{
1022 struct uart_port *port = &ourport->port;
1023 struct s3c2410_uartcfg *cfg;
1024 struct resource *res;
1025 int ret;
1026
1027 dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
1028
1029 if (platdev == NULL)
1030 return -ENODEV;
1031
1032 cfg = s3c24xx_dev_to_cfg(&platdev->dev);
1033
1034 if (port->mapbase != 0)
1035 return 0;
1036
1037 if (cfg->hwport > 3)
1038 return -EINVAL;
1039
1040 /* setup info for port */
1041 port->dev = &platdev->dev;
1042 ourport->info = info;
1043
1044 /* copy the info in from provided structure */
1045 ourport->port.fifosize = info->fifosize;
1046
1047 dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
1048
1049 port->uartclk = 1;
1050
1051 if (cfg->uart_flags & UPF_CONS_FLOW) {
1052 dbg("s3c24xx_serial_init_port: enabling flow control\n");
1053 port->flags |= UPF_CONS_FLOW;
1054 }
1055
1056 /* sort our the physical and virtual addresses for each UART */
1057
1058 res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
1059 if (res == NULL) {
1060 printk(KERN_ERR "failed to find memory resource for uart\n");
1061 return -EINVAL;
1062 }
1063
1064 dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
1065
1066 port->mapbase = res->start;
1067 port->membase = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART);
1068 ret = platform_get_irq(platdev, 0);
1069 if (ret < 0)
1070 port->irq = 0;
1071 else
1072 port->irq = ret;
1073
1074 ourport->clk = clk_get(&platdev->dev, "uart");
1075
1076 dbg("port: map=%08x, mem=%08x, irq=%d, clock=%ld\n",
1077 port->mapbase, port->membase, port->irq, port->uartclk);
1078
1079 /* reset the fifos (and setup the uart) */
1080 s3c24xx_serial_resetport(port, cfg);
1081 return 0;
1082}
1083
1084/* Device driver serial port probe */
1085
1086static int probe_index = 0;
1087
1088static int s3c24xx_serial_probe(struct platform_device *dev,
1089 struct s3c24xx_uart_info *info)
1090{
1091 struct s3c24xx_uart_port *ourport;
1092 int ret;
1093
1094 dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
1095
1096 ourport = &s3c24xx_serial_ports[probe_index];
1097 probe_index++;
1098
1099 dbg("%s: initialising port %p...\n", __func__, ourport);
1100
1101 ret = s3c24xx_serial_init_port(ourport, info, dev);
1102 if (ret < 0)
1103 goto probe_err;
1104
1105 dbg("%s: adding port\n", __func__);
1106 uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
1107 platform_set_drvdata(dev, &ourport->port);
1108
1109 return 0;
1110
1111 probe_err:
1112 return ret;
1113}
1114
1115static int s3c24xx_serial_remove(struct platform_device *dev)
1116{
1117 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1118
1119 if (port)
1120 uart_remove_one_port(&s3c24xx_uart_drv, port);
1121
1122 return 0;
1123}
1124
1125/* UART power management code */
1126
1127#ifdef CONFIG_PM
1128
1129static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t state)
1130{
1131 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1132
1133 if (port)
1134 uart_suspend_port(&s3c24xx_uart_drv, port);
1135
1136 return 0;
1137}
1138
1139static int s3c24xx_serial_resume(struct platform_device *dev)
1140{
1141 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1142 struct s3c24xx_uart_port *ourport = to_ourport(port);
1143
1144 if (port) {
1145 clk_enable(ourport->clk);
1146 s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port));
1147 clk_disable(ourport->clk);
1148
1149 uart_resume_port(&s3c24xx_uart_drv, port);
1150 }
1151
1152 return 0;
1153}
1154
1155#else
1156#define s3c24xx_serial_suspend NULL
1157#define s3c24xx_serial_resume NULL
1158#endif
1159
1160static int s3c24xx_serial_init(struct platform_driver *drv,
1161 struct s3c24xx_uart_info *info)
1162{
1163 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
1164 return platform_driver_register(drv);
1165}
1166
1167
1168/* now comes the code to initialise either the s3c2410 or s3c2440 serial
1169 * port information
1170*/
1171
1172/* cpu specific variations on the serial port support */
1173
1174#ifdef CONFIG_CPU_S3C2400
1175
1176static int s3c2400_serial_getsource(struct uart_port *port,
1177 struct s3c24xx_uart_clksrc *clk)
1178{
1179 clk->divisor = 1;
1180 clk->name = "pclk";
1181
1182 return 0;
1183}
1184
1185static int s3c2400_serial_setsource(struct uart_port *port,
1186 struct s3c24xx_uart_clksrc *clk)
1187{
1188 return 0;
1189}
1190
1191static int s3c2400_serial_resetport(struct uart_port *port,
1192 struct s3c2410_uartcfg *cfg)
1193{
1194 dbg("s3c2400_serial_resetport: port=%p (%08lx), cfg=%p\n",
1195 port, port->mapbase, cfg);
1196
1197 wr_regl(port, S3C2410_UCON, cfg->ucon);
1198 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
1199
1200 /* reset both fifos */
1201
1202 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
1203 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
1204
1205 return 0;
1206}
1207
1208static struct s3c24xx_uart_info s3c2400_uart_inf = {
1209 .name = "Samsung S3C2400 UART",
1210 .type = PORT_S3C2400,
1211 .fifosize = 16,
1212 .rx_fifomask = S3C2410_UFSTAT_RXMASK,
1213 .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
1214 .rx_fifofull = S3C2410_UFSTAT_RXFULL,
1215 .tx_fifofull = S3C2410_UFSTAT_TXFULL,
1216 .tx_fifomask = S3C2410_UFSTAT_TXMASK,
1217 .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
1218 .get_clksrc = s3c2400_serial_getsource,
1219 .set_clksrc = s3c2400_serial_setsource,
1220 .reset_port = s3c2400_serial_resetport,
1221};
1222
1223static int s3c2400_serial_probe(struct platform_device *dev)
1224{
1225 return s3c24xx_serial_probe(dev, &s3c2400_uart_inf);
1226}
1227
1228static struct platform_driver s3c2400_serial_drv = {
1229 .probe = s3c2400_serial_probe,
1230 .remove = s3c24xx_serial_remove,
1231 .suspend = s3c24xx_serial_suspend,
1232 .resume = s3c24xx_serial_resume,
1233 .driver = {
1234 .name = "s3c2400-uart",
1235 .owner = THIS_MODULE,
1236 },
1237};
1238
1239static inline int s3c2400_serial_init(void)
1240{
1241 return s3c24xx_serial_init(&s3c2400_serial_drv, &s3c2400_uart_inf);
1242}
1243
1244static inline void s3c2400_serial_exit(void)
1245{
1246 platform_driver_unregister(&s3c2400_serial_drv);
1247}
1248
1249#define s3c2400_uart_inf_at &s3c2400_uart_inf
1250#else
1251
1252static inline int s3c2400_serial_init(void)
1253{
1254 return 0;
1255}
1256
1257static inline void s3c2400_serial_exit(void)
1258{
1259}
1260
1261#define s3c2400_uart_inf_at NULL
1262
1263#endif /* CONFIG_CPU_S3C2400 */
1264
1265/* S3C2410 support */
1266
1267#ifdef CONFIG_CPU_S3C2410
1268 28
1269static int s3c2410_serial_setsource(struct uart_port *port, 29static int s3c2410_serial_setsource(struct uart_port *port,
1270 struct s3c24xx_uart_clksrc *clk) 30 struct s3c24xx_uart_clksrc *clk)
@@ -1323,8 +83,6 @@ static struct s3c24xx_uart_info s3c2410_uart_inf = {
1323 .reset_port = s3c2410_serial_resetport, 83 .reset_port = s3c2410_serial_resetport,
1324}; 84};
1325 85
1326/* device management */
1327
1328static int s3c2410_serial_probe(struct platform_device *dev) 86static int s3c2410_serial_probe(struct platform_device *dev)
1329{ 87{
1330 return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); 88 return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
@@ -1333,612 +91,28 @@ static int s3c2410_serial_probe(struct platform_device *dev)
1333static struct platform_driver s3c2410_serial_drv = { 91static struct platform_driver s3c2410_serial_drv = {
1334 .probe = s3c2410_serial_probe, 92 .probe = s3c2410_serial_probe,
1335 .remove = s3c24xx_serial_remove, 93 .remove = s3c24xx_serial_remove,
1336 .suspend = s3c24xx_serial_suspend,
1337 .resume = s3c24xx_serial_resume,
1338 .driver = { 94 .driver = {
1339 .name = "s3c2410-uart", 95 .name = "s3c2410-uart",
1340 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
1341 }, 97 },
1342}; 98};
1343 99
1344static inline int s3c2410_serial_init(void) 100s3c24xx_console_init(&s3c2410_serial_drv, &s3c2410_uart_inf);
101
102static int __init s3c2410_serial_init(void)
1345{ 103{
1346 return s3c24xx_serial_init(&s3c2410_serial_drv, &s3c2410_uart_inf); 104 return s3c24xx_serial_init(&s3c2410_serial_drv, &s3c2410_uart_inf);
1347} 105}
1348 106
1349static inline void s3c2410_serial_exit(void) 107static void __exit s3c2410_serial_exit(void)
1350{ 108{
1351 platform_driver_unregister(&s3c2410_serial_drv); 109 platform_driver_unregister(&s3c2410_serial_drv);
1352} 110}
1353 111
1354#define s3c2410_uart_inf_at &s3c2410_uart_inf 112module_init(s3c2410_serial_init);
1355#else 113module_exit(s3c2410_serial_exit);
1356
1357static inline int s3c2410_serial_init(void)
1358{
1359 return 0;
1360}
1361
1362static inline void s3c2410_serial_exit(void)
1363{
1364}
1365
1366#define s3c2410_uart_inf_at NULL
1367
1368#endif /* CONFIG_CPU_S3C2410 */
1369
1370#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442)
1371
1372static int s3c2440_serial_setsource(struct uart_port *port,
1373 struct s3c24xx_uart_clksrc *clk)
1374{
1375 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1376
1377 // todo - proper fclk<>nonfclk switch //
1378
1379 ucon &= ~S3C2440_UCON_CLKMASK;
1380
1381 if (strcmp(clk->name, "uclk") == 0)
1382 ucon |= S3C2440_UCON_UCLK;
1383 else if (strcmp(clk->name, "pclk") == 0)
1384 ucon |= S3C2440_UCON_PCLK;
1385 else if (strcmp(clk->name, "fclk") == 0)
1386 ucon |= S3C2440_UCON_FCLK;
1387 else {
1388 printk(KERN_ERR "unknown clock source %s\n", clk->name);
1389 return -EINVAL;
1390 }
1391
1392 wr_regl(port, S3C2410_UCON, ucon);
1393 return 0;
1394}
1395
1396
1397static int s3c2440_serial_getsource(struct uart_port *port,
1398 struct s3c24xx_uart_clksrc *clk)
1399{
1400 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1401 unsigned long ucon0, ucon1, ucon2;
1402
1403 switch (ucon & S3C2440_UCON_CLKMASK) {
1404 case S3C2440_UCON_UCLK:
1405 clk->divisor = 1;
1406 clk->name = "uclk";
1407 break;
1408
1409 case S3C2440_UCON_PCLK:
1410 case S3C2440_UCON_PCLK2:
1411 clk->divisor = 1;
1412 clk->name = "pclk";
1413 break;
1414
1415 case S3C2440_UCON_FCLK:
1416 /* the fun of calculating the uart divisors on
1417 * the s3c2440 */
1418
1419 ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
1420 ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
1421 ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
1422
1423 printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
1424
1425 ucon0 &= S3C2440_UCON0_DIVMASK;
1426 ucon1 &= S3C2440_UCON1_DIVMASK;
1427 ucon2 &= S3C2440_UCON2_DIVMASK;
1428
1429 if (ucon0 != 0) {
1430 clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
1431 clk->divisor += 6;
1432 } else if (ucon1 != 0) {
1433 clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
1434 clk->divisor += 21;
1435 } else if (ucon2 != 0) {
1436 clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
1437 clk->divisor += 36;
1438 } else {
1439 /* manual calims 44, seems to be 9 */
1440 clk->divisor = 9;
1441 }
1442
1443 clk->name = "fclk";
1444 break;
1445 }
1446
1447 return 0;
1448}
1449
1450static int s3c2440_serial_resetport(struct uart_port *port,
1451 struct s3c2410_uartcfg *cfg)
1452{
1453 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1454
1455 dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
1456 port, port->mapbase, cfg);
1457
1458 /* ensure we don't change the clock settings... */
1459
1460 ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
1461
1462 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
1463 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
1464
1465 /* reset both fifos */
1466
1467 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
1468 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
1469
1470 return 0;
1471}
1472
1473static struct s3c24xx_uart_info s3c2440_uart_inf = {
1474 .name = "Samsung S3C2440 UART",
1475 .type = PORT_S3C2440,
1476 .fifosize = 64,
1477 .rx_fifomask = S3C2440_UFSTAT_RXMASK,
1478 .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
1479 .rx_fifofull = S3C2440_UFSTAT_RXFULL,
1480 .tx_fifofull = S3C2440_UFSTAT_TXFULL,
1481 .tx_fifomask = S3C2440_UFSTAT_TXMASK,
1482 .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
1483 .get_clksrc = s3c2440_serial_getsource,
1484 .set_clksrc = s3c2440_serial_setsource,
1485 .reset_port = s3c2440_serial_resetport,
1486};
1487
1488/* device management */
1489
1490static int s3c2440_serial_probe(struct platform_device *dev)
1491{
1492 dbg("s3c2440_serial_probe: dev=%p\n", dev);
1493 return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
1494}
1495
1496static struct platform_driver s3c2440_serial_drv = {
1497 .probe = s3c2440_serial_probe,
1498 .remove = s3c24xx_serial_remove,
1499 .suspend = s3c24xx_serial_suspend,
1500 .resume = s3c24xx_serial_resume,
1501 .driver = {
1502 .name = "s3c2440-uart",
1503 .owner = THIS_MODULE,
1504 },
1505};
1506
1507
1508static inline int s3c2440_serial_init(void)
1509{
1510 return s3c24xx_serial_init(&s3c2440_serial_drv, &s3c2440_uart_inf);
1511}
1512
1513static inline void s3c2440_serial_exit(void)
1514{
1515 platform_driver_unregister(&s3c2440_serial_drv);
1516}
1517
1518#define s3c2440_uart_inf_at &s3c2440_uart_inf
1519#else
1520
1521static inline int s3c2440_serial_init(void)
1522{
1523 return 0;
1524}
1525
1526static inline void s3c2440_serial_exit(void)
1527{
1528}
1529
1530#define s3c2440_uart_inf_at NULL
1531#endif /* CONFIG_CPU_S3C2440 */
1532
1533#if defined(CONFIG_CPU_S3C2412)
1534
1535static int s3c2412_serial_setsource(struct uart_port *port,
1536 struct s3c24xx_uart_clksrc *clk)
1537{
1538 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1539
1540 ucon &= ~S3C2412_UCON_CLKMASK;
1541
1542 if (strcmp(clk->name, "uclk") == 0)
1543 ucon |= S3C2440_UCON_UCLK;
1544 else if (strcmp(clk->name, "pclk") == 0)
1545 ucon |= S3C2440_UCON_PCLK;
1546 else if (strcmp(clk->name, "usysclk") == 0)
1547 ucon |= S3C2412_UCON_USYSCLK;
1548 else {
1549 printk(KERN_ERR "unknown clock source %s\n", clk->name);
1550 return -EINVAL;
1551 }
1552
1553 wr_regl(port, S3C2410_UCON, ucon);
1554 return 0;
1555}
1556
1557
1558static int s3c2412_serial_getsource(struct uart_port *port,
1559 struct s3c24xx_uart_clksrc *clk)
1560{
1561 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1562
1563 switch (ucon & S3C2412_UCON_CLKMASK) {
1564 case S3C2412_UCON_UCLK:
1565 clk->divisor = 1;
1566 clk->name = "uclk";
1567 break;
1568
1569 case S3C2412_UCON_PCLK:
1570 case S3C2412_UCON_PCLK2:
1571 clk->divisor = 1;
1572 clk->name = "pclk";
1573 break;
1574
1575 case S3C2412_UCON_USYSCLK:
1576 clk->divisor = 1;
1577 clk->name = "usysclk";
1578 break;
1579 }
1580
1581 return 0;
1582}
1583
1584static int s3c2412_serial_resetport(struct uart_port *port,
1585 struct s3c2410_uartcfg *cfg)
1586{
1587 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1588
1589 dbg("%s: port=%p (%08lx), cfg=%p\n",
1590 __func__, port, port->mapbase, cfg);
1591
1592 /* ensure we don't change the clock settings... */
1593
1594 ucon &= S3C2412_UCON_CLKMASK;
1595
1596 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
1597 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
1598
1599 /* reset both fifos */
1600
1601 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
1602 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
1603
1604 return 0;
1605}
1606
1607static struct s3c24xx_uart_info s3c2412_uart_inf = {
1608 .name = "Samsung S3C2412 UART",
1609 .type = PORT_S3C2412,
1610 .fifosize = 64,
1611 .rx_fifomask = S3C2440_UFSTAT_RXMASK,
1612 .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
1613 .rx_fifofull = S3C2440_UFSTAT_RXFULL,
1614 .tx_fifofull = S3C2440_UFSTAT_TXFULL,
1615 .tx_fifomask = S3C2440_UFSTAT_TXMASK,
1616 .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
1617 .get_clksrc = s3c2412_serial_getsource,
1618 .set_clksrc = s3c2412_serial_setsource,
1619 .reset_port = s3c2412_serial_resetport,
1620};
1621
1622/* device management */
1623
1624static int s3c2412_serial_probe(struct platform_device *dev)
1625{
1626 dbg("s3c2440_serial_probe: dev=%p\n", dev);
1627 return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
1628}
1629
1630static struct platform_driver s3c2412_serial_drv = {
1631 .probe = s3c2412_serial_probe,
1632 .remove = s3c24xx_serial_remove,
1633 .suspend = s3c24xx_serial_suspend,
1634 .resume = s3c24xx_serial_resume,
1635 .driver = {
1636 .name = "s3c2412-uart",
1637 .owner = THIS_MODULE,
1638 },
1639};
1640
1641
1642static inline int s3c2412_serial_init(void)
1643{
1644 return s3c24xx_serial_init(&s3c2412_serial_drv, &s3c2412_uart_inf);
1645}
1646
1647static inline void s3c2412_serial_exit(void)
1648{
1649 platform_driver_unregister(&s3c2412_serial_drv);
1650}
1651
1652#define s3c2412_uart_inf_at &s3c2412_uart_inf
1653#else
1654
1655static inline int s3c2412_serial_init(void)
1656{
1657 return 0;
1658}
1659
1660static inline void s3c2412_serial_exit(void)
1661{
1662}
1663
1664#define s3c2412_uart_inf_at NULL
1665#endif /* CONFIG_CPU_S3C2440 */
1666
1667
1668/* module initialisation code */
1669
1670static int __init s3c24xx_serial_modinit(void)
1671{
1672 int ret;
1673
1674 ret = uart_register_driver(&s3c24xx_uart_drv);
1675 if (ret < 0) {
1676 printk(KERN_ERR "failed to register UART driver\n");
1677 return -1;
1678 }
1679
1680 s3c2400_serial_init();
1681 s3c2410_serial_init();
1682 s3c2412_serial_init();
1683 s3c2440_serial_init();
1684
1685 return 0;
1686}
1687
1688static void __exit s3c24xx_serial_modexit(void)
1689{
1690 s3c2400_serial_exit();
1691 s3c2410_serial_exit();
1692 s3c2412_serial_exit();
1693 s3c2440_serial_exit();
1694
1695 uart_unregister_driver(&s3c24xx_uart_drv);
1696}
1697
1698
1699module_init(s3c24xx_serial_modinit);
1700module_exit(s3c24xx_serial_modexit);
1701
1702/* Console code */
1703
1704#ifdef CONFIG_SERIAL_S3C2410_CONSOLE
1705
1706static struct uart_port *cons_uart;
1707
1708static int
1709s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
1710{
1711 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
1712 unsigned long ufstat, utrstat;
1713
1714 if (ufcon & S3C2410_UFCON_FIFOMODE) {
1715 /* fifo mode - check ammount of data in fifo registers... */
1716
1717 ufstat = rd_regl(port, S3C2410_UFSTAT);
1718 return (ufstat & info->tx_fifofull) ? 0 : 1;
1719 }
1720
1721 /* in non-fifo mode, we go and use the tx buffer empty */
1722
1723 utrstat = rd_regl(port, S3C2410_UTRSTAT);
1724 return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
1725}
1726
1727static void
1728s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
1729{
1730 unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
1731 while (!s3c24xx_serial_console_txrdy(port, ufcon))
1732 barrier();
1733 wr_regb(cons_uart, S3C2410_UTXH, ch);
1734}
1735
1736static void
1737s3c24xx_serial_console_write(struct console *co, const char *s,
1738 unsigned int count)
1739{
1740 uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
1741}
1742
1743static void __init
1744s3c24xx_serial_get_options(struct uart_port *port, int *baud,
1745 int *parity, int *bits)
1746{
1747 struct s3c24xx_uart_clksrc clksrc;
1748 struct clk *clk;
1749 unsigned int ulcon;
1750 unsigned int ucon;
1751 unsigned int ubrdiv;
1752 unsigned long rate;
1753
1754 ulcon = rd_regl(port, S3C2410_ULCON);
1755 ucon = rd_regl(port, S3C2410_UCON);
1756 ubrdiv = rd_regl(port, S3C2410_UBRDIV);
1757
1758 dbg("s3c24xx_serial_get_options: port=%p\n"
1759 "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
1760 port, ulcon, ucon, ubrdiv);
1761
1762 if ((ucon & 0xf) != 0) {
1763 /* consider the serial port configured if the tx/rx mode set */
1764
1765 switch (ulcon & S3C2410_LCON_CSMASK) {
1766 case S3C2410_LCON_CS5:
1767 *bits = 5;
1768 break;
1769 case S3C2410_LCON_CS6:
1770 *bits = 6;
1771 break;
1772 case S3C2410_LCON_CS7:
1773 *bits = 7;
1774 break;
1775 default:
1776 case S3C2410_LCON_CS8:
1777 *bits = 8;
1778 break;
1779 }
1780
1781 switch (ulcon & S3C2410_LCON_PMASK) {
1782 case S3C2410_LCON_PEVEN:
1783 *parity = 'e';
1784 break;
1785
1786 case S3C2410_LCON_PODD:
1787 *parity = 'o';
1788 break;
1789
1790 case S3C2410_LCON_PNONE:
1791 default:
1792 *parity = 'n';
1793 }
1794
1795 /* now calculate the baud rate */
1796
1797 s3c24xx_serial_getsource(port, &clksrc);
1798
1799 clk = clk_get(port->dev, clksrc.name);
1800 if (!IS_ERR(clk) && clk != NULL)
1801 rate = clk_get_rate(clk) / clksrc.divisor;
1802 else
1803 rate = 1;
1804
1805
1806 *baud = rate / ( 16 * (ubrdiv + 1));
1807 dbg("calculated baud %d\n", *baud);
1808 }
1809
1810}
1811
1812/* s3c24xx_serial_init_ports
1813 *
1814 * initialise the serial ports from the machine provided initialisation
1815 * data.
1816*/
1817
1818static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info *info)
1819{
1820 struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
1821 struct platform_device **platdev_ptr;
1822 int i;
1823
1824 dbg("s3c24xx_serial_init_ports: initialising ports...\n");
1825
1826 platdev_ptr = s3c24xx_uart_devs;
1827
1828 for (i = 0; i < NR_PORTS; i++, ptr++, platdev_ptr++) {
1829 s3c24xx_serial_init_port(ptr, info, *platdev_ptr);
1830 }
1831
1832 return 0;
1833}
1834
1835static int __init
1836s3c24xx_serial_console_setup(struct console *co, char *options)
1837{
1838 struct uart_port *port;
1839 int baud = 9600;
1840 int bits = 8;
1841 int parity = 'n';
1842 int flow = 'n';
1843
1844 dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
1845 co, co->index, options);
1846
1847 /* is this a valid port */
1848
1849 if (co->index == -1 || co->index >= NR_PORTS)
1850 co->index = 0;
1851
1852 port = &s3c24xx_serial_ports[co->index].port;
1853
1854 /* is the port configured? */
1855
1856 if (port->mapbase == 0x0) {
1857 co->index = 0;
1858 port = &s3c24xx_serial_ports[co->index].port;
1859 }
1860
1861 cons_uart = port;
1862
1863 dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
1864
1865 /*
1866 * Check whether an invalid uart number has been specified, and
1867 * if so, search for the first available port that does have
1868 * console support.
1869 */
1870 if (options)
1871 uart_parse_options(options, &baud, &parity, &bits, &flow);
1872 else
1873 s3c24xx_serial_get_options(port, &baud, &parity, &bits);
1874
1875 dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
1876
1877 return uart_set_options(port, co, baud, parity, bits, flow);
1878}
1879
1880/* s3c24xx_serial_initconsole
1881 *
1882 * initialise the console from one of the uart drivers
1883*/
1884
1885static struct console s3c24xx_serial_console =
1886{
1887 .name = S3C24XX_SERIAL_NAME,
1888 .device = uart_console_device,
1889 .flags = CON_PRINTBUFFER,
1890 .index = -1,
1891 .write = s3c24xx_serial_console_write,
1892 .setup = s3c24xx_serial_console_setup
1893};
1894
1895static int s3c24xx_serial_initconsole(void)
1896{
1897 struct s3c24xx_uart_info *info;
1898 struct platform_device *dev = s3c24xx_uart_devs[0];
1899
1900 dbg("s3c24xx_serial_initconsole\n");
1901
1902 /* select driver based on the cpu */
1903
1904 if (dev == NULL) {
1905 printk(KERN_ERR "s3c24xx: no devices for console init\n");
1906 return 0;
1907 }
1908
1909 if (strcmp(dev->name, "s3c2400-uart") == 0) {
1910 info = s3c2400_uart_inf_at;
1911 } else if (strcmp(dev->name, "s3c2410-uart") == 0) {
1912 info = s3c2410_uart_inf_at;
1913 } else if (strcmp(dev->name, "s3c2440-uart") == 0) {
1914 info = s3c2440_uart_inf_at;
1915 } else if (strcmp(dev->name, "s3c2412-uart") == 0) {
1916 info = s3c2412_uart_inf_at;
1917 } else {
1918 printk(KERN_ERR "s3c24xx: no driver for %s\n", dev->name);
1919 return 0;
1920 }
1921
1922 if (info == NULL) {
1923 printk(KERN_ERR "s3c24xx: no driver for console\n");
1924 return 0;
1925 }
1926
1927 s3c24xx_serial_console.data = &s3c24xx_uart_drv;
1928 s3c24xx_serial_init_ports(info);
1929
1930 register_console(&s3c24xx_serial_console);
1931 return 0;
1932}
1933
1934console_initcall(s3c24xx_serial_initconsole);
1935
1936#endif /* CONFIG_SERIAL_S3C2410_CONSOLE */
1937 114
1938MODULE_LICENSE("GPL"); 115MODULE_LICENSE("GPL v2");
1939MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 116MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
1940MODULE_DESCRIPTION("Samsung S3C2410/S3C2440/S3C2412 Serial port driver"); 117MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver");
1941MODULE_ALIAS("platform:s3c2400-uart");
1942MODULE_ALIAS("platform:s3c2410-uart"); 118MODULE_ALIAS("platform:s3c2410-uart");
1943MODULE_ALIAS("platform:s3c2412-uart");
1944MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
new file mode 100644
index 000000000000..ce0c220e3e92
--- /dev/null
+++ b/drivers/serial/s3c2412.c
@@ -0,0 +1,151 @@
1/* linux/drivers/serial/s3c2412.c
2 *
3 * Driver for Samsung S3C2412 and S3C2413 SoC onboard UARTs.
4 *
5 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/module.h>
14#include <linux/ioport.h>
15#include <linux/io.h>
16#include <linux/platform_device.h>
17#include <linux/init.h>
18#include <linux/serial_core.h>
19#include <linux/serial.h>
20
21#include <asm/irq.h>
22#include <asm/hardware.h>
23
24#include <asm/plat-s3c/regs-serial.h>
25#include <asm/arch/regs-gpio.h>
26
27#include "samsung.h"
28
29static int s3c2412_serial_setsource(struct uart_port *port,
30 struct s3c24xx_uart_clksrc *clk)
31{
32 unsigned long ucon = rd_regl(port, S3C2410_UCON);
33
34 ucon &= ~S3C2412_UCON_CLKMASK;
35
36 if (strcmp(clk->name, "uclk") == 0)
37 ucon |= S3C2440_UCON_UCLK;
38 else if (strcmp(clk->name, "pclk") == 0)
39 ucon |= S3C2440_UCON_PCLK;
40 else if (strcmp(clk->name, "usysclk") == 0)
41 ucon |= S3C2412_UCON_USYSCLK;
42 else {
43 printk(KERN_ERR "unknown clock source %s\n", clk->name);
44 return -EINVAL;
45 }
46
47 wr_regl(port, S3C2410_UCON, ucon);
48 return 0;
49}
50
51
52static int s3c2412_serial_getsource(struct uart_port *port,
53 struct s3c24xx_uart_clksrc *clk)
54{
55 unsigned long ucon = rd_regl(port, S3C2410_UCON);
56
57 switch (ucon & S3C2412_UCON_CLKMASK) {
58 case S3C2412_UCON_UCLK:
59 clk->divisor = 1;
60 clk->name = "uclk";
61 break;
62
63 case S3C2412_UCON_PCLK:
64 case S3C2412_UCON_PCLK2:
65 clk->divisor = 1;
66 clk->name = "pclk";
67 break;
68
69 case S3C2412_UCON_USYSCLK:
70 clk->divisor = 1;
71 clk->name = "usysclk";
72 break;
73 }
74
75 return 0;
76}
77
78static int s3c2412_serial_resetport(struct uart_port *port,
79 struct s3c2410_uartcfg *cfg)
80{
81 unsigned long ucon = rd_regl(port, S3C2410_UCON);
82
83 dbg("%s: port=%p (%08lx), cfg=%p\n",
84 __func__, port, port->mapbase, cfg);
85
86 /* ensure we don't change the clock settings... */
87
88 ucon &= S3C2412_UCON_CLKMASK;
89
90 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
91 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
92
93 /* reset both fifos */
94
95 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
96 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
97
98 return 0;
99}
100
101static struct s3c24xx_uart_info s3c2412_uart_inf = {
102 .name = "Samsung S3C2412 UART",
103 .type = PORT_S3C2412,
104 .fifosize = 64,
105 .rx_fifomask = S3C2440_UFSTAT_RXMASK,
106 .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
107 .rx_fifofull = S3C2440_UFSTAT_RXFULL,
108 .tx_fifofull = S3C2440_UFSTAT_TXFULL,
109 .tx_fifomask = S3C2440_UFSTAT_TXMASK,
110 .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
111 .get_clksrc = s3c2412_serial_getsource,
112 .set_clksrc = s3c2412_serial_setsource,
113 .reset_port = s3c2412_serial_resetport,
114};
115
116/* device management */
117
118static int s3c2412_serial_probe(struct platform_device *dev)
119{
120 dbg("s3c2440_serial_probe: dev=%p\n", dev);
121 return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
122}
123
124static struct platform_driver s3c2412_serial_drv = {
125 .probe = s3c2412_serial_probe,
126 .remove = s3c24xx_serial_remove,
127 .driver = {
128 .name = "s3c2412-uart",
129 .owner = THIS_MODULE,
130 },
131};
132
133s3c24xx_console_init(&s3c2412_serial_drv, &s3c2412_uart_inf);
134
135static inline int s3c2412_serial_init(void)
136{
137 return s3c24xx_serial_init(&s3c2412_serial_drv, &s3c2412_uart_inf);
138}
139
140static inline void s3c2412_serial_exit(void)
141{
142 platform_driver_unregister(&s3c2412_serial_drv);
143}
144
145module_init(s3c2412_serial_init);
146module_exit(s3c2412_serial_exit);
147
148MODULE_DESCRIPTION("Samsung S3C2412,S3C2413 SoC Serial port driver");
149MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
150MODULE_LICENSE("GPL v2");
151MODULE_ALIAS("platform:s3c2412-uart");
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
new file mode 100644
index 000000000000..38f954bd39c6
--- /dev/null
+++ b/drivers/serial/s3c2440.c
@@ -0,0 +1,181 @@
1/* linux/drivers/serial/s3c2440.c
2 *
3 * Driver for Samsung S3C2440 and S3C2442 SoC onboard UARTs.
4 *
5 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/module.h>
14#include <linux/ioport.h>
15#include <linux/io.h>
16#include <linux/platform_device.h>
17#include <linux/init.h>
18#include <linux/serial_core.h>
19#include <linux/serial.h>
20
21#include <asm/irq.h>
22#include <asm/hardware.h>
23
24#include <asm/plat-s3c/regs-serial.h>
25#include <asm/arch/regs-gpio.h>
26
27#include "samsung.h"
28
29
30static int s3c2440_serial_setsource(struct uart_port *port,
31 struct s3c24xx_uart_clksrc *clk)
32{
33 unsigned long ucon = rd_regl(port, S3C2410_UCON);
34
35 /* todo - proper fclk<>nonfclk switch. */
36
37 ucon &= ~S3C2440_UCON_CLKMASK;
38
39 if (strcmp(clk->name, "uclk") == 0)
40 ucon |= S3C2440_UCON_UCLK;
41 else if (strcmp(clk->name, "pclk") == 0)
42 ucon |= S3C2440_UCON_PCLK;
43 else if (strcmp(clk->name, "fclk") == 0)
44 ucon |= S3C2440_UCON_FCLK;
45 else {
46 printk(KERN_ERR "unknown clock source %s\n", clk->name);
47 return -EINVAL;
48 }
49
50 wr_regl(port, S3C2410_UCON, ucon);
51 return 0;
52}
53
54
55static int s3c2440_serial_getsource(struct uart_port *port,
56 struct s3c24xx_uart_clksrc *clk)
57{
58 unsigned long ucon = rd_regl(port, S3C2410_UCON);
59 unsigned long ucon0, ucon1, ucon2;
60
61 switch (ucon & S3C2440_UCON_CLKMASK) {
62 case S3C2440_UCON_UCLK:
63 clk->divisor = 1;
64 clk->name = "uclk";
65 break;
66
67 case S3C2440_UCON_PCLK:
68 case S3C2440_UCON_PCLK2:
69 clk->divisor = 1;
70 clk->name = "pclk";
71 break;
72
73 case S3C2440_UCON_FCLK:
74 /* the fun of calculating the uart divisors on
75 * the s3c2440 */
76
77 ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
78 ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
79 ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
80
81 printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
82
83 ucon0 &= S3C2440_UCON0_DIVMASK;
84 ucon1 &= S3C2440_UCON1_DIVMASK;
85 ucon2 &= S3C2440_UCON2_DIVMASK;
86
87 if (ucon0 != 0) {
88 clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
89 clk->divisor += 6;
90 } else if (ucon1 != 0) {
91 clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
92 clk->divisor += 21;
93 } else if (ucon2 != 0) {
94 clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
95 clk->divisor += 36;
96 } else {
97 /* manual calims 44, seems to be 9 */
98 clk->divisor = 9;
99 }
100
101 clk->name = "fclk";
102 break;
103 }
104
105 return 0;
106}
107
108static int s3c2440_serial_resetport(struct uart_port *port,
109 struct s3c2410_uartcfg *cfg)
110{
111 unsigned long ucon = rd_regl(port, S3C2410_UCON);
112
113 dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
114 port, port->mapbase, cfg);
115
116 /* ensure we don't change the clock settings... */
117
118 ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
119
120 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
121 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
122
123 /* reset both fifos */
124
125 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
126 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
127
128 return 0;
129}
130
131static struct s3c24xx_uart_info s3c2440_uart_inf = {
132 .name = "Samsung S3C2440 UART",
133 .type = PORT_S3C2440,
134 .fifosize = 64,
135 .rx_fifomask = S3C2440_UFSTAT_RXMASK,
136 .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
137 .rx_fifofull = S3C2440_UFSTAT_RXFULL,
138 .tx_fifofull = S3C2440_UFSTAT_TXFULL,
139 .tx_fifomask = S3C2440_UFSTAT_TXMASK,
140 .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
141 .get_clksrc = s3c2440_serial_getsource,
142 .set_clksrc = s3c2440_serial_setsource,
143 .reset_port = s3c2440_serial_resetport,
144};
145
146/* device management */
147
148static int s3c2440_serial_probe(struct platform_device *dev)
149{
150 dbg("s3c2440_serial_probe: dev=%p\n", dev);
151 return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
152}
153
154static struct platform_driver s3c2440_serial_drv = {
155 .probe = s3c2440_serial_probe,
156 .remove = s3c24xx_serial_remove,
157 .driver = {
158 .name = "s3c2440-uart",
159 .owner = THIS_MODULE,
160 },
161};
162
163s3c24xx_console_init(&s3c2440_serial_drv, &s3c2440_uart_inf);
164
165static int __init s3c2440_serial_init(void)
166{
167 return s3c24xx_serial_init(&s3c2440_serial_drv, &s3c2440_uart_inf);
168}
169
170static void __exit s3c2440_serial_exit(void)
171{
172 platform_driver_unregister(&s3c2440_serial_drv);
173}
174
175module_init(s3c2440_serial_init);
176module_exit(s3c2440_serial_exit);
177
178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
180MODULE_LICENSE("GPLi v2");
181MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
new file mode 100644
index 000000000000..4a3ecaa629e6
--- /dev/null
+++ b/drivers/serial/samsung.c
@@ -0,0 +1,1317 @@
1/* linux/drivers/serial/samsuing.c
2 *
3 * Driver core for Samsung SoC onboard UARTs.
4 *
5 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* Hote on 2410 error handling
14 *
15 * The s3c2410 manual has a love/hate affair with the contents of the
16 * UERSTAT register in the UART blocks, and keeps marking some of the
17 * error bits as reserved. Having checked with the s3c2410x01,
18 * it copes with BREAKs properly, so I am happy to ignore the RESERVED
19 * feature from the latter versions of the manual.
20 *
21 * If it becomes aparrent that latter versions of the 2410 remove these
22 * bits, then action will have to be taken to differentiate the versions
23 * and change the policy on BREAK
24 *
25 * BJD, 04-Nov-2004
26*/
27
28#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
29#define SUPPORT_SYSRQ
30#endif
31
32#include <linux/module.h>
33#include <linux/ioport.h>
34#include <linux/io.h>
35#include <linux/platform_device.h>
36#include <linux/init.h>
37#include <linux/sysrq.h>
38#include <linux/console.h>
39#include <linux/tty.h>
40#include <linux/tty_flip.h>
41#include <linux/serial_core.h>
42#include <linux/serial.h>
43#include <linux/delay.h>
44#include <linux/clk.h>
45
46#include <asm/irq.h>
47
48#include <asm/hardware.h>
49
50#include <asm/plat-s3c/regs-serial.h>
51#include <asm/arch/regs-gpio.h>
52
53#include "samsung.h"
54
55/* UART name and device definitions */
56
57#define S3C24XX_SERIAL_NAME "ttySAC"
58#define S3C24XX_SERIAL_MAJOR 204
59#define S3C24XX_SERIAL_MINOR 64
60
61/* we can support 3 uarts, but not always use them */
62
63#ifdef CONFIG_CPU_S3C2400
64#define NR_PORTS (2)
65#else
66#define NR_PORTS (3)
67#endif
68
69/* port irq numbers */
70
71#define TX_IRQ(port) ((port)->irq + 1)
72#define RX_IRQ(port) ((port)->irq)
73
74/* macros to change one thing to another */
75
76#define tx_enabled(port) ((port)->unused[0])
77#define rx_enabled(port) ((port)->unused[1])
78
79/* flag to ignore all characters comming in */
80#define RXSTAT_DUMMY_READ (0x10000000)
81
82static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
83{
84 return container_of(port, struct s3c24xx_uart_port, port);
85}
86
87/* translate a port to the device name */
88
89static inline const char *s3c24xx_serial_portname(struct uart_port *port)
90{
91 return to_platform_device(port->dev)->name;
92}
93
94static int s3c24xx_serial_txempty_nofifo(struct uart_port *port)
95{
96 return (rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE);
97}
98
99static void s3c24xx_serial_rx_enable(struct uart_port *port)
100{
101 unsigned long flags;
102 unsigned int ucon, ufcon;
103 int count = 10000;
104
105 spin_lock_irqsave(&port->lock, flags);
106
107 while (--count && !s3c24xx_serial_txempty_nofifo(port))
108 udelay(100);
109
110 ufcon = rd_regl(port, S3C2410_UFCON);
111 ufcon |= S3C2410_UFCON_RESETRX;
112 wr_regl(port, S3C2410_UFCON, ufcon);
113
114 ucon = rd_regl(port, S3C2410_UCON);
115 ucon |= S3C2410_UCON_RXIRQMODE;
116 wr_regl(port, S3C2410_UCON, ucon);
117
118 rx_enabled(port) = 1;
119 spin_unlock_irqrestore(&port->lock, flags);
120}
121
122static void s3c24xx_serial_rx_disable(struct uart_port *port)
123{
124 unsigned long flags;
125 unsigned int ucon;
126
127 spin_lock_irqsave(&port->lock, flags);
128
129 ucon = rd_regl(port, S3C2410_UCON);
130 ucon &= ~S3C2410_UCON_RXIRQMODE;
131 wr_regl(port, S3C2410_UCON, ucon);
132
133 rx_enabled(port) = 0;
134 spin_unlock_irqrestore(&port->lock, flags);
135}
136
137static void s3c24xx_serial_stop_tx(struct uart_port *port)
138{
139 if (tx_enabled(port)) {
140 disable_irq(TX_IRQ(port));
141 tx_enabled(port) = 0;
142 if (port->flags & UPF_CONS_FLOW)
143 s3c24xx_serial_rx_enable(port);
144 }
145}
146
147static void s3c24xx_serial_start_tx(struct uart_port *port)
148{
149 if (!tx_enabled(port)) {
150 if (port->flags & UPF_CONS_FLOW)
151 s3c24xx_serial_rx_disable(port);
152
153 enable_irq(TX_IRQ(port));
154 tx_enabled(port) = 1;
155 }
156}
157
158
159static void s3c24xx_serial_stop_rx(struct uart_port *port)
160{
161 if (rx_enabled(port)) {
162 dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
163 disable_irq(RX_IRQ(port));
164 rx_enabled(port) = 0;
165 }
166}
167
168static void s3c24xx_serial_enable_ms(struct uart_port *port)
169{
170}
171
172static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port)
173{
174 return to_ourport(port)->info;
175}
176
177static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
178{
179 if (port->dev == NULL)
180 return NULL;
181
182 return (struct s3c2410_uartcfg *)port->dev->platform_data;
183}
184
185static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
186 unsigned long ufstat)
187{
188 struct s3c24xx_uart_info *info = ourport->info;
189
190 if (ufstat & info->rx_fifofull)
191 return info->fifosize;
192
193 return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
194}
195
196
197/* ? - where has parity gone?? */
198#define S3C2410_UERSTAT_PARITY (0x1000)
199
200static irqreturn_t
201s3c24xx_serial_rx_chars(int irq, void *dev_id)
202{
203 struct s3c24xx_uart_port *ourport = dev_id;
204 struct uart_port *port = &ourport->port;
205 struct tty_struct *tty = port->info->tty;
206 unsigned int ufcon, ch, flag, ufstat, uerstat;
207 int max_count = 64;
208
209 while (max_count-- > 0) {
210 ufcon = rd_regl(port, S3C2410_UFCON);
211 ufstat = rd_regl(port, S3C2410_UFSTAT);
212
213 if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
214 break;
215
216 uerstat = rd_regl(port, S3C2410_UERSTAT);
217 ch = rd_regb(port, S3C2410_URXH);
218
219 if (port->flags & UPF_CONS_FLOW) {
220 int txe = s3c24xx_serial_txempty_nofifo(port);
221
222 if (rx_enabled(port)) {
223 if (!txe) {
224 rx_enabled(port) = 0;
225 continue;
226 }
227 } else {
228 if (txe) {
229 ufcon |= S3C2410_UFCON_RESETRX;
230 wr_regl(port, S3C2410_UFCON, ufcon);
231 rx_enabled(port) = 1;
232 goto out;
233 }
234 continue;
235 }
236 }
237
238 /* insert the character into the buffer */
239
240 flag = TTY_NORMAL;
241 port->icount.rx++;
242
243 if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
244 dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
245 ch, uerstat);
246
247 /* check for break */
248 if (uerstat & S3C2410_UERSTAT_BREAK) {
249 dbg("break!\n");
250 port->icount.brk++;
251 if (uart_handle_break(port))
252 goto ignore_char;
253 }
254
255 if (uerstat & S3C2410_UERSTAT_FRAME)
256 port->icount.frame++;
257 if (uerstat & S3C2410_UERSTAT_OVERRUN)
258 port->icount.overrun++;
259
260 uerstat &= port->read_status_mask;
261
262 if (uerstat & S3C2410_UERSTAT_BREAK)
263 flag = TTY_BREAK;
264 else if (uerstat & S3C2410_UERSTAT_PARITY)
265 flag = TTY_PARITY;
266 else if (uerstat & (S3C2410_UERSTAT_FRAME |
267 S3C2410_UERSTAT_OVERRUN))
268 flag = TTY_FRAME;
269 }
270
271 if (uart_handle_sysrq_char(port, ch))
272 goto ignore_char;
273
274 uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
275 ch, flag);
276
277 ignore_char:
278 continue;
279 }
280 tty_flip_buffer_push(tty);
281
282 out:
283 return IRQ_HANDLED;
284}
285
286static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
287{
288 struct s3c24xx_uart_port *ourport = id;
289 struct uart_port *port = &ourport->port;
290 struct circ_buf *xmit = &port->info->xmit;
291 int count = 256;
292
293 if (port->x_char) {
294 wr_regb(port, S3C2410_UTXH, port->x_char);
295 port->icount.tx++;
296 port->x_char = 0;
297 goto out;
298 }
299
300 /* if there isnt anything more to transmit, or the uart is now
301 * stopped, disable the uart and exit
302 */
303
304 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
305 s3c24xx_serial_stop_tx(port);
306 goto out;
307 }
308
309 /* try and drain the buffer... */
310
311 while (!uart_circ_empty(xmit) && count-- > 0) {
312 if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
313 break;
314
315 wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
316 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
317 port->icount.tx++;
318 }
319
320 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
321 uart_write_wakeup(port);
322
323 if (uart_circ_empty(xmit))
324 s3c24xx_serial_stop_tx(port);
325
326 out:
327 return IRQ_HANDLED;
328}
329
330static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
331{
332 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
333 unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
334 unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
335
336 if (ufcon & S3C2410_UFCON_FIFOMODE) {
337 if ((ufstat & info->tx_fifomask) != 0 ||
338 (ufstat & info->tx_fifofull))
339 return 0;
340
341 return 1;
342 }
343
344 return s3c24xx_serial_txempty_nofifo(port);
345}
346
347/* no modem control lines */
348static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
349{
350 unsigned int umstat = rd_regb(port, S3C2410_UMSTAT);
351
352 if (umstat & S3C2410_UMSTAT_CTS)
353 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
354 else
355 return TIOCM_CAR | TIOCM_DSR;
356}
357
358static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
359{
360 /* todo - possibly remove AFC and do manual CTS */
361}
362
363static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
364{
365 unsigned long flags;
366 unsigned int ucon;
367
368 spin_lock_irqsave(&port->lock, flags);
369
370 ucon = rd_regl(port, S3C2410_UCON);
371
372 if (break_state)
373 ucon |= S3C2410_UCON_SBREAK;
374 else
375 ucon &= ~S3C2410_UCON_SBREAK;
376
377 wr_regl(port, S3C2410_UCON, ucon);
378
379 spin_unlock_irqrestore(&port->lock, flags);
380}
381
382static void s3c24xx_serial_shutdown(struct uart_port *port)
383{
384 struct s3c24xx_uart_port *ourport = to_ourport(port);
385
386 if (ourport->tx_claimed) {
387 free_irq(TX_IRQ(port), ourport);
388 tx_enabled(port) = 0;
389 ourport->tx_claimed = 0;
390 }
391
392 if (ourport->rx_claimed) {
393 free_irq(RX_IRQ(port), ourport);
394 ourport->rx_claimed = 0;
395 rx_enabled(port) = 0;
396 }
397}
398
399
400static int s3c24xx_serial_startup(struct uart_port *port)
401{
402 struct s3c24xx_uart_port *ourport = to_ourport(port);
403 int ret;
404
405 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
406 port->mapbase, port->membase);
407
408 rx_enabled(port) = 1;
409
410 ret = request_irq(RX_IRQ(port),
411 s3c24xx_serial_rx_chars, 0,
412 s3c24xx_serial_portname(port), ourport);
413
414 if (ret != 0) {
415 printk(KERN_ERR "cannot get irq %d\n", RX_IRQ(port));
416 return ret;
417 }
418
419 ourport->rx_claimed = 1;
420
421 dbg("requesting tx irq...\n");
422
423 tx_enabled(port) = 1;
424
425 ret = request_irq(TX_IRQ(port),
426 s3c24xx_serial_tx_chars, 0,
427 s3c24xx_serial_portname(port), ourport);
428
429 if (ret) {
430 printk(KERN_ERR "cannot get irq %d\n", TX_IRQ(port));
431 goto err;
432 }
433
434 ourport->tx_claimed = 1;
435
436 dbg("s3c24xx_serial_startup ok\n");
437
438 /* the port reset code should have done the correct
439 * register setup for the port controls */
440
441 return ret;
442
443 err:
444 s3c24xx_serial_shutdown(port);
445 return ret;
446}
447
448/* power power management control */
449
450static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
451 unsigned int old)
452{
453 struct s3c24xx_uart_port *ourport = to_ourport(port);
454
455 switch (level) {
456 case 3:
457 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
458 clk_disable(ourport->baudclk);
459
460 clk_disable(ourport->clk);
461 break;
462
463 case 0:
464 clk_enable(ourport->clk);
465
466 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
467 clk_enable(ourport->baudclk);
468
469 break;
470 default:
471 printk(KERN_ERR "s3c24xx_serial: unknown pm %d\n", level);
472 }
473}
474
475/* baud rate calculation
476 *
477 * The UARTs on the S3C2410/S3C2440 can take their clocks from a number
478 * of different sources, including the peripheral clock ("pclk") and an
479 * external clock ("uclk"). The S3C2440 also adds the core clock ("fclk")
480 * with a programmable extra divisor.
481 *
482 * The following code goes through the clock sources, and calculates the
483 * baud clocks (and the resultant actual baud rates) and then tries to
484 * pick the closest one and select that.
485 *
486*/
487
488
489#define MAX_CLKS (8)
490
491static struct s3c24xx_uart_clksrc tmp_clksrc = {
492 .name = "pclk",
493 .min_baud = 0,
494 .max_baud = 0,
495 .divisor = 1,
496};
497
498static inline int
499s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
500{
501 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
502
503 return (info->get_clksrc)(port, c);
504}
505
506static inline int
507s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
508{
509 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
510
511 return (info->set_clksrc)(port, c);
512}
513
514struct baud_calc {
515 struct s3c24xx_uart_clksrc *clksrc;
516 unsigned int calc;
517 unsigned int quot;
518 struct clk *src;
519};
520
521static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
522 struct uart_port *port,
523 struct s3c24xx_uart_clksrc *clksrc,
524 unsigned int baud)
525{
526 unsigned long rate;
527
528 calc->src = clk_get(port->dev, clksrc->name);
529 if (calc->src == NULL || IS_ERR(calc->src))
530 return 0;
531
532 rate = clk_get_rate(calc->src);
533 rate /= clksrc->divisor;
534
535 calc->clksrc = clksrc;
536 calc->quot = (rate + (8 * baud)) / (16 * baud);
537 calc->calc = (rate / (calc->quot * 16));
538
539 calc->quot--;
540 return 1;
541}
542
543static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
544 struct s3c24xx_uart_clksrc **clksrc,
545 struct clk **clk,
546 unsigned int baud)
547{
548 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
549 struct s3c24xx_uart_clksrc *clkp;
550 struct baud_calc res[MAX_CLKS];
551 struct baud_calc *resptr, *best, *sptr;
552 int i;
553
554 clkp = cfg->clocks;
555 best = NULL;
556
557 if (cfg->clocks_size < 2) {
558 if (cfg->clocks_size == 0)
559 clkp = &tmp_clksrc;
560
561 /* check to see if we're sourcing fclk, and if so we're
562 * going to have to update the clock source
563 */
564
565 if (strcmp(clkp->name, "fclk") == 0) {
566 struct s3c24xx_uart_clksrc src;
567
568 s3c24xx_serial_getsource(port, &src);
569
570 /* check that the port already using fclk, and if
571 * not, then re-select fclk
572 */
573
574 if (strcmp(src.name, clkp->name) == 0) {
575 s3c24xx_serial_setsource(port, clkp);
576 s3c24xx_serial_getsource(port, &src);
577 }
578
579 clkp->divisor = src.divisor;
580 }
581
582 s3c24xx_serial_calcbaud(res, port, clkp, baud);
583 best = res;
584 resptr = best + 1;
585 } else {
586 resptr = res;
587
588 for (i = 0; i < cfg->clocks_size; i++, clkp++) {
589 if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
590 resptr++;
591 }
592 }
593
594 /* ok, we now need to select the best clock we found */
595
596 if (!best) {
597 unsigned int deviation = (1<<30)|((1<<30)-1);
598 int calc_deviation;
599
600 for (sptr = res; sptr < resptr; sptr++) {
601 calc_deviation = baud - sptr->calc;
602 if (calc_deviation < 0)
603 calc_deviation = -calc_deviation;
604
605 if (calc_deviation < deviation) {
606 best = sptr;
607 deviation = calc_deviation;
608 }
609 }
610 }
611
612 /* store results to pass back */
613
614 *clksrc = best->clksrc;
615 *clk = best->src;
616
617 return best->quot;
618}
619
620static void s3c24xx_serial_set_termios(struct uart_port *port,
621 struct ktermios *termios,
622 struct ktermios *old)
623{
624 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
625 struct s3c24xx_uart_port *ourport = to_ourport(port);
626 struct s3c24xx_uart_clksrc *clksrc = NULL;
627 struct clk *clk = NULL;
628 unsigned long flags;
629 unsigned int baud, quot;
630 unsigned int ulcon;
631 unsigned int umcon;
632
633 /*
634 * We don't support modem control lines.
635 */
636 termios->c_cflag &= ~(HUPCL | CMSPAR);
637 termios->c_cflag |= CLOCAL;
638
639 /*
640 * Ask the core to calculate the divisor for us.
641 */
642
643 baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
644
645 if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
646 quot = port->custom_divisor;
647 else
648 quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
649
650 /* check to see if we need to change clock source */
651
652 if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
653 s3c24xx_serial_setsource(port, clksrc);
654
655 if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
656 clk_disable(ourport->baudclk);
657 ourport->baudclk = NULL;
658 }
659
660 clk_enable(clk);
661
662 ourport->clksrc = clksrc;
663 ourport->baudclk = clk;
664 }
665
666 switch (termios->c_cflag & CSIZE) {
667 case CS5:
668 dbg("config: 5bits/char\n");
669 ulcon = S3C2410_LCON_CS5;
670 break;
671 case CS6:
672 dbg("config: 6bits/char\n");
673 ulcon = S3C2410_LCON_CS6;
674 break;
675 case CS7:
676 dbg("config: 7bits/char\n");
677 ulcon = S3C2410_LCON_CS7;
678 break;
679 case CS8:
680 default:
681 dbg("config: 8bits/char\n");
682 ulcon = S3C2410_LCON_CS8;
683 break;
684 }
685
686 /* preserve original lcon IR settings */
687 ulcon |= (cfg->ulcon & S3C2410_LCON_IRM);
688
689 if (termios->c_cflag & CSTOPB)
690 ulcon |= S3C2410_LCON_STOPB;
691
692 umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
693
694 if (termios->c_cflag & PARENB) {
695 if (termios->c_cflag & PARODD)
696 ulcon |= S3C2410_LCON_PODD;
697 else
698 ulcon |= S3C2410_LCON_PEVEN;
699 } else {
700 ulcon |= S3C2410_LCON_PNONE;
701 }
702
703 spin_lock_irqsave(&port->lock, flags);
704
705 dbg("setting ulcon to %08x, brddiv to %d\n", ulcon, quot);
706
707 wr_regl(port, S3C2410_ULCON, ulcon);
708 wr_regl(port, S3C2410_UBRDIV, quot);
709 wr_regl(port, S3C2410_UMCON, umcon);
710
711 dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
712 rd_regl(port, S3C2410_ULCON),
713 rd_regl(port, S3C2410_UCON),
714 rd_regl(port, S3C2410_UFCON));
715
716 /*
717 * Update the per-port timeout.
718 */
719 uart_update_timeout(port, termios->c_cflag, baud);
720
721 /*
722 * Which character status flags are we interested in?
723 */
724 port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
725 if (termios->c_iflag & INPCK)
726 port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY;
727
728 /*
729 * Which character status flags should we ignore?
730 */
731 port->ignore_status_mask = 0;
732 if (termios->c_iflag & IGNPAR)
733 port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN;
734 if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
735 port->ignore_status_mask |= S3C2410_UERSTAT_FRAME;
736
737 /*
738 * Ignore all characters if CREAD is not set.
739 */
740 if ((termios->c_cflag & CREAD) == 0)
741 port->ignore_status_mask |= RXSTAT_DUMMY_READ;
742
743 spin_unlock_irqrestore(&port->lock, flags);
744}
745
746static const char *s3c24xx_serial_type(struct uart_port *port)
747{
748 switch (port->type) {
749 case PORT_S3C2410:
750 return "S3C2410";
751 case PORT_S3C2440:
752 return "S3C2440";
753 case PORT_S3C2412:
754 return "S3C2412";
755 default:
756 return NULL;
757 }
758}
759
760#define MAP_SIZE (0x100)
761
762static void s3c24xx_serial_release_port(struct uart_port *port)
763{
764 release_mem_region(port->mapbase, MAP_SIZE);
765}
766
767static int s3c24xx_serial_request_port(struct uart_port *port)
768{
769 const char *name = s3c24xx_serial_portname(port);
770 return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
771}
772
773static void s3c24xx_serial_config_port(struct uart_port *port, int flags)
774{
775 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
776
777 if (flags & UART_CONFIG_TYPE &&
778 s3c24xx_serial_request_port(port) == 0)
779 port->type = info->type;
780}
781
782/*
783 * verify the new serial_struct (for TIOCSSERIAL).
784 */
785static int
786s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
787{
788 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
789
790 if (ser->type != PORT_UNKNOWN && ser->type != info->type)
791 return -EINVAL;
792
793 return 0;
794}
795
796
797#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
798
799static struct console s3c24xx_serial_console;
800
801#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
802#else
803#define S3C24XX_SERIAL_CONSOLE NULL
804#endif
805
806static struct uart_ops s3c24xx_serial_ops = {
807 .pm = s3c24xx_serial_pm,
808 .tx_empty = s3c24xx_serial_tx_empty,
809 .get_mctrl = s3c24xx_serial_get_mctrl,
810 .set_mctrl = s3c24xx_serial_set_mctrl,
811 .stop_tx = s3c24xx_serial_stop_tx,
812 .start_tx = s3c24xx_serial_start_tx,
813 .stop_rx = s3c24xx_serial_stop_rx,
814 .enable_ms = s3c24xx_serial_enable_ms,
815 .break_ctl = s3c24xx_serial_break_ctl,
816 .startup = s3c24xx_serial_startup,
817 .shutdown = s3c24xx_serial_shutdown,
818 .set_termios = s3c24xx_serial_set_termios,
819 .type = s3c24xx_serial_type,
820 .release_port = s3c24xx_serial_release_port,
821 .request_port = s3c24xx_serial_request_port,
822 .config_port = s3c24xx_serial_config_port,
823 .verify_port = s3c24xx_serial_verify_port,
824};
825
826
827static struct uart_driver s3c24xx_uart_drv = {
828 .owner = THIS_MODULE,
829 .dev_name = "s3c2410_serial",
830 .nr = 3,
831 .cons = S3C24XX_SERIAL_CONSOLE,
832 .driver_name = S3C24XX_SERIAL_NAME,
833 .major = S3C24XX_SERIAL_MAJOR,
834 .minor = S3C24XX_SERIAL_MINOR,
835};
836
837static struct s3c24xx_uart_port s3c24xx_serial_ports[NR_PORTS] = {
838 [0] = {
839 .port = {
840 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
841 .iotype = UPIO_MEM,
842 .irq = IRQ_S3CUART_RX0,
843 .uartclk = 0,
844 .fifosize = 16,
845 .ops = &s3c24xx_serial_ops,
846 .flags = UPF_BOOT_AUTOCONF,
847 .line = 0,
848 }
849 },
850 [1] = {
851 .port = {
852 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
853 .iotype = UPIO_MEM,
854 .irq = IRQ_S3CUART_RX1,
855 .uartclk = 0,
856 .fifosize = 16,
857 .ops = &s3c24xx_serial_ops,
858 .flags = UPF_BOOT_AUTOCONF,
859 .line = 1,
860 }
861 },
862#if NR_PORTS > 2
863
864 [2] = {
865 .port = {
866 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
867 .iotype = UPIO_MEM,
868 .irq = IRQ_S3CUART_RX2,
869 .uartclk = 0,
870 .fifosize = 16,
871 .ops = &s3c24xx_serial_ops,
872 .flags = UPF_BOOT_AUTOCONF,
873 .line = 2,
874 }
875 }
876#endif
877};
878
879/* s3c24xx_serial_resetport
880 *
881 * wrapper to call the specific reset for this port (reset the fifos
882 * and the settings)
883*/
884
885static inline int s3c24xx_serial_resetport(struct uart_port *port,
886 struct s3c2410_uartcfg *cfg)
887{
888 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
889
890 return (info->reset_port)(port, cfg);
891}
892
893/* s3c24xx_serial_init_port
894 *
895 * initialise a single serial port from the platform device given
896 */
897
898static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
899 struct s3c24xx_uart_info *info,
900 struct platform_device *platdev)
901{
902 struct uart_port *port = &ourport->port;
903 struct s3c2410_uartcfg *cfg;
904 struct resource *res;
905 int ret;
906
907 dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
908
909 if (platdev == NULL)
910 return -ENODEV;
911
912 cfg = s3c24xx_dev_to_cfg(&platdev->dev);
913
914 if (port->mapbase != 0)
915 return 0;
916
917 if (cfg->hwport > 3)
918 return -EINVAL;
919
920 /* setup info for port */
921 port->dev = &platdev->dev;
922 ourport->info = info;
923
924 /* copy the info in from provided structure */
925 ourport->port.fifosize = info->fifosize;
926
927 dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
928
929 port->uartclk = 1;
930
931 if (cfg->uart_flags & UPF_CONS_FLOW) {
932 dbg("s3c24xx_serial_init_port: enabling flow control\n");
933 port->flags |= UPF_CONS_FLOW;
934 }
935
936 /* sort our the physical and virtual addresses for each UART */
937
938 res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
939 if (res == NULL) {
940 printk(KERN_ERR "failed to find memory resource for uart\n");
941 return -EINVAL;
942 }
943
944 dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
945
946 port->mapbase = res->start;
947 port->membase = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART);
948 ret = platform_get_irq(platdev, 0);
949 if (ret < 0)
950 port->irq = 0;
951 else
952 port->irq = ret;
953
954 ourport->clk = clk_get(&platdev->dev, "uart");
955
956 dbg("port: map=%08x, mem=%08x, irq=%d, clock=%ld\n",
957 port->mapbase, port->membase, port->irq, port->uartclk);
958
959 /* reset the fifos (and setup the uart) */
960 s3c24xx_serial_resetport(port, cfg);
961 return 0;
962}
963
964static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
965 struct device_attribute *attr,
966 char *buf)
967{
968 struct uart_port *port = s3c24xx_dev_to_port(dev);
969 struct s3c24xx_uart_port *ourport = to_ourport(port);
970
971 return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
972}
973
974static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
975
976/* Device driver serial port probe */
977
978static int probe_index;
979
980int s3c24xx_serial_probe(struct platform_device *dev,
981 struct s3c24xx_uart_info *info)
982{
983 struct s3c24xx_uart_port *ourport;
984 int ret;
985
986 dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
987
988 ourport = &s3c24xx_serial_ports[probe_index];
989 probe_index++;
990
991 dbg("%s: initialising port %p...\n", __func__, ourport);
992
993 ret = s3c24xx_serial_init_port(ourport, info, dev);
994 if (ret < 0)
995 goto probe_err;
996
997 dbg("%s: adding port\n", __func__);
998 uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
999 platform_set_drvdata(dev, &ourport->port);
1000
1001 ret = device_create_file(&dev->dev, &dev_attr_clock_source);
1002 if (ret < 0)
1003 printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
1004
1005 return 0;
1006
1007 probe_err:
1008 return ret;
1009}
1010
1011EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
1012
1013int s3c24xx_serial_remove(struct platform_device *dev)
1014{
1015 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1016
1017 if (port) {
1018 device_remove_file(&dev->dev, &dev_attr_clock_source);
1019 uart_remove_one_port(&s3c24xx_uart_drv, port);
1020 }
1021
1022 return 0;
1023}
1024
1025EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
1026
1027/* UART power management code */
1028
1029#ifdef CONFIG_PM
1030
1031static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t state)
1032{
1033 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1034
1035 if (port)
1036 uart_suspend_port(&s3c24xx_uart_drv, port);
1037
1038 return 0;
1039}
1040
1041static int s3c24xx_serial_resume(struct platform_device *dev)
1042{
1043 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1044 struct s3c24xx_uart_port *ourport = to_ourport(port);
1045
1046 if (port) {
1047 clk_enable(ourport->clk);
1048 s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port));
1049 clk_disable(ourport->clk);
1050
1051 uart_resume_port(&s3c24xx_uart_drv, port);
1052 }
1053
1054 return 0;
1055}
1056#endif
1057
1058int s3c24xx_serial_init(struct platform_driver *drv,
1059 struct s3c24xx_uart_info *info)
1060{
1061 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
1062
1063#ifdef CONFIG_PM
1064 drv->suspend = s3c24xx_serial_suspend;
1065 drv->resume = s3c24xx_serial_resume;
1066#endif
1067
1068 return platform_driver_register(drv);
1069}
1070
1071EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
1072
1073/* module initialisation code */
1074
1075static int __init s3c24xx_serial_modinit(void)
1076{
1077 int ret;
1078
1079 ret = uart_register_driver(&s3c24xx_uart_drv);
1080 if (ret < 0) {
1081 printk(KERN_ERR "failed to register UART driver\n");
1082 return -1;
1083 }
1084
1085 return 0;
1086}
1087
1088static void __exit s3c24xx_serial_modexit(void)
1089{
1090 uart_unregister_driver(&s3c24xx_uart_drv);
1091}
1092
1093module_init(s3c24xx_serial_modinit);
1094module_exit(s3c24xx_serial_modexit);
1095
1096/* Console code */
1097
1098#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
1099
1100static struct uart_port *cons_uart;
1101
1102static int
1103s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
1104{
1105 struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
1106 unsigned long ufstat, utrstat;
1107
1108 if (ufcon & S3C2410_UFCON_FIFOMODE) {
1109 /* fifo mode - check ammount of data in fifo registers... */
1110
1111 ufstat = rd_regl(port, S3C2410_UFSTAT);
1112 return (ufstat & info->tx_fifofull) ? 0 : 1;
1113 }
1114
1115 /* in non-fifo mode, we go and use the tx buffer empty */
1116
1117 utrstat = rd_regl(port, S3C2410_UTRSTAT);
1118 return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
1119}
1120
1121static void
1122s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
1123{
1124 unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
1125 while (!s3c24xx_serial_console_txrdy(port, ufcon))
1126 barrier();
1127 wr_regb(cons_uart, S3C2410_UTXH, ch);
1128}
1129
1130static void
1131s3c24xx_serial_console_write(struct console *co, const char *s,
1132 unsigned int count)
1133{
1134 uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
1135}
1136
1137static void __init
1138s3c24xx_serial_get_options(struct uart_port *port, int *baud,
1139 int *parity, int *bits)
1140{
1141 struct s3c24xx_uart_clksrc clksrc;
1142 struct clk *clk;
1143 unsigned int ulcon;
1144 unsigned int ucon;
1145 unsigned int ubrdiv;
1146 unsigned long rate;
1147
1148 ulcon = rd_regl(port, S3C2410_ULCON);
1149 ucon = rd_regl(port, S3C2410_UCON);
1150 ubrdiv = rd_regl(port, S3C2410_UBRDIV);
1151
1152 dbg("s3c24xx_serial_get_options: port=%p\n"
1153 "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
1154 port, ulcon, ucon, ubrdiv);
1155
1156 if ((ucon & 0xf) != 0) {
1157 /* consider the serial port configured if the tx/rx mode set */
1158
1159 switch (ulcon & S3C2410_LCON_CSMASK) {
1160 case S3C2410_LCON_CS5:
1161 *bits = 5;
1162 break;
1163 case S3C2410_LCON_CS6:
1164 *bits = 6;
1165 break;
1166 case S3C2410_LCON_CS7:
1167 *bits = 7;
1168 break;
1169 default:
1170 case S3C2410_LCON_CS8:
1171 *bits = 8;
1172 break;
1173 }
1174
1175 switch (ulcon & S3C2410_LCON_PMASK) {
1176 case S3C2410_LCON_PEVEN:
1177 *parity = 'e';
1178 break;
1179
1180 case S3C2410_LCON_PODD:
1181 *parity = 'o';
1182 break;
1183
1184 case S3C2410_LCON_PNONE:
1185 default:
1186 *parity = 'n';
1187 }
1188
1189 /* now calculate the baud rate */
1190
1191 s3c24xx_serial_getsource(port, &clksrc);
1192
1193 clk = clk_get(port->dev, clksrc.name);
1194 if (!IS_ERR(clk) && clk != NULL)
1195 rate = clk_get_rate(clk) / clksrc.divisor;
1196 else
1197 rate = 1;
1198
1199
1200 *baud = rate / (16 * (ubrdiv + 1));
1201 dbg("calculated baud %d\n", *baud);
1202 }
1203
1204}
1205
1206/* s3c24xx_serial_init_ports
1207 *
1208 * initialise the serial ports from the machine provided initialisation
1209 * data.
1210*/
1211
1212static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info *info)
1213{
1214 struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
1215 struct platform_device **platdev_ptr;
1216 int i;
1217
1218 dbg("s3c24xx_serial_init_ports: initialising ports...\n");
1219
1220 platdev_ptr = s3c24xx_uart_devs;
1221
1222 for (i = 0; i < NR_PORTS; i++, ptr++, platdev_ptr++) {
1223 s3c24xx_serial_init_port(ptr, info, *platdev_ptr);
1224 }
1225
1226 return 0;
1227}
1228
1229static int __init
1230s3c24xx_serial_console_setup(struct console *co, char *options)
1231{
1232 struct uart_port *port;
1233 int baud = 9600;
1234 int bits = 8;
1235 int parity = 'n';
1236 int flow = 'n';
1237
1238 dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
1239 co, co->index, options);
1240
1241 /* is this a valid port */
1242
1243 if (co->index == -1 || co->index >= NR_PORTS)
1244 co->index = 0;
1245
1246 port = &s3c24xx_serial_ports[co->index].port;
1247
1248 /* is the port configured? */
1249
1250 if (port->mapbase == 0x0) {
1251 co->index = 0;
1252 port = &s3c24xx_serial_ports[co->index].port;
1253 }
1254
1255 cons_uart = port;
1256
1257 dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
1258
1259 /*
1260 * Check whether an invalid uart number has been specified, and
1261 * if so, search for the first available port that does have
1262 * console support.
1263 */
1264 if (options)
1265 uart_parse_options(options, &baud, &parity, &bits, &flow);
1266 else
1267 s3c24xx_serial_get_options(port, &baud, &parity, &bits);
1268
1269 dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
1270
1271 return uart_set_options(port, co, baud, parity, bits, flow);
1272}
1273
1274/* s3c24xx_serial_initconsole
1275 *
1276 * initialise the console from one of the uart drivers
1277*/
1278
1279static struct console s3c24xx_serial_console = {
1280 .name = S3C24XX_SERIAL_NAME,
1281 .device = uart_console_device,
1282 .flags = CON_PRINTBUFFER,
1283 .index = -1,
1284 .write = s3c24xx_serial_console_write,
1285 .setup = s3c24xx_serial_console_setup
1286};
1287
1288int s3c24xx_serial_initconsole(struct platform_driver *drv,
1289 struct s3c24xx_uart_info *info)
1290
1291{
1292 struct platform_device *dev = s3c24xx_uart_devs[0];
1293
1294 dbg("s3c24xx_serial_initconsole\n");
1295
1296 /* select driver based on the cpu */
1297
1298 if (dev == NULL) {
1299 printk(KERN_ERR "s3c24xx: no devices for console init\n");
1300 return 0;
1301 }
1302
1303 if (strcmp(dev->name, drv->driver.name) != 0)
1304 return 0;
1305
1306 s3c24xx_serial_console.data = &s3c24xx_uart_drv;
1307 s3c24xx_serial_init_ports(info);
1308
1309 register_console(&s3c24xx_serial_console);
1310 return 0;
1311}
1312
1313#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
1314
1315MODULE_DESCRIPTION("Samsung SoC Serial port driver");
1316MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
1317MODULE_LICENSE("GPL v2");
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
new file mode 100644
index 000000000000..5c92ebbe7d9e
--- /dev/null
+++ b/drivers/serial/samsung.h
@@ -0,0 +1,102 @@
1/* linux/drivers/serial/samsung.h
2 *
3 * Driver for Samsung SoC onboard UARTs.
4 *
5 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13struct s3c24xx_uart_info {
14 char *name;
15 unsigned int type;
16 unsigned int fifosize;
17 unsigned long rx_fifomask;
18 unsigned long rx_fifoshift;
19 unsigned long rx_fifofull;
20 unsigned long tx_fifomask;
21 unsigned long tx_fifoshift;
22 unsigned long tx_fifofull;
23
24 /* clock source control */
25
26 int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
27 int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
28
29 /* uart controls */
30 int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
31};
32
33struct s3c24xx_uart_port {
34 unsigned char rx_claimed;
35 unsigned char tx_claimed;
36
37 struct s3c24xx_uart_info *info;
38 struct s3c24xx_uart_clksrc *clksrc;
39 struct clk *clk;
40 struct clk *baudclk;
41 struct uart_port port;
42};
43
44/* conversion functions */
45
46#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
47#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
48
49/* register access controls */
50
51#define portaddr(port, reg) ((port)->membase + (reg))
52
53#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
54#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
55
56#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
57#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
58
59extern int s3c24xx_serial_probe(struct platform_device *dev,
60 struct s3c24xx_uart_info *uart);
61
62extern int s3c24xx_serial_remove(struct platform_device *dev);
63
64extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
65 struct s3c24xx_uart_info *uart);
66
67extern int s3c24xx_serial_init(struct platform_driver *drv,
68 struct s3c24xx_uart_info *info);
69
70#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
71
72#define s3c24xx_console_init(__drv, __inf) \
73static int __init s3c_serial_console_init(void) \
74{ \
75 return s3c24xx_serial_initconsole(__drv, __inf); \
76} \
77 \
78console_initcall(s3c_serial_console_init)
79
80#else
81#define s3c24xx_console_init(drv, inf) extern void no_console(void)
82#endif
83
84#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
85
86extern void printascii(const char *);
87
88static void dbg(const char *fmt, ...)
89{
90 va_list va;
91 char buff[256];
92
93 va_start(va, fmt);
94 vsprintf(buff, fmt, va);
95 va_end(va);
96
97 printascii(buff);
98}
99
100#else
101#define dbg(x...) do { } while (0)
102#endif
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index c730d05bfeb6..54ac7bea5f8c 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -29,6 +29,7 @@
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/clk.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
@@ -250,6 +251,8 @@ struct driver_data {
250 int tx_dma_needs_unmap; 251 int tx_dma_needs_unmap;
251 size_t tx_map_len; 252 size_t tx_map_len;
252 u32 dummy_dma_buf ____cacheline_aligned; 253 u32 dummy_dma_buf ____cacheline_aligned;
254
255 struct clk *clk;
253}; 256};
254 257
255/* Runtime state */ 258/* Runtime state */
@@ -855,15 +858,15 @@ static irqreturn_t spi_int(int irq, void *dev_id)
855 return drv_data->transfer_handler(drv_data); 858 return drv_data->transfer_handler(drv_data);
856} 859}
857 860
858static inline u32 spi_speed_hz(u32 data_rate) 861static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
859{ 862{
860 return imx_get_perclk2() / (4 << ((data_rate) >> 13)); 863 return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
861} 864}
862 865
863static u32 spi_data_rate(u32 speed_hz) 866static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
864{ 867{
865 u32 div; 868 u32 div;
866 u32 quantized_hz = imx_get_perclk2() >> 2; 869 u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
867 870
868 for (div = SPI_PERCLK2_DIV_MIN; 871 for (div = SPI_PERCLK2_DIV_MIN;
869 div <= SPI_PERCLK2_DIV_MAX; 872 div <= SPI_PERCLK2_DIV_MAX;
@@ -947,7 +950,7 @@ static void pump_transfers(unsigned long data)
947 tmp = transfer->speed_hz; 950 tmp = transfer->speed_hz;
948 if (tmp == 0) 951 if (tmp == 0)
949 tmp = chip->max_speed_hz; 952 tmp = chip->max_speed_hz;
950 tmp = spi_data_rate(tmp); 953 tmp = spi_data_rate(drv_data, tmp);
951 u32_EDIT(control, SPI_CONTROL_DATARATE, tmp); 954 u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
952 955
953 writel(control, regs + SPI_CONTROL); 956 writel(control, regs + SPI_CONTROL);
@@ -1109,7 +1112,7 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
1109 msg->actual_length = 0; 1112 msg->actual_length = 0;
1110 1113
1111 /* Per transfer setup check */ 1114 /* Per transfer setup check */
1112 min_speed_hz = spi_speed_hz(SPI_CONTROL_DATARATE_MIN); 1115 min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
1113 max_speed_hz = spi->max_speed_hz; 1116 max_speed_hz = spi->max_speed_hz;
1114 list_for_each_entry(trans, &msg->transfers, transfer_list) { 1117 list_for_each_entry(trans, &msg->transfers, transfer_list) {
1115 tmp = trans->bits_per_word; 1118 tmp = trans->bits_per_word;
@@ -1176,6 +1179,7 @@ msg_rejected:
1176 applied and notified to the calling driver. */ 1179 applied and notified to the calling driver. */
1177static int setup(struct spi_device *spi) 1180static int setup(struct spi_device *spi)
1178{ 1181{
1182 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1179 struct spi_imx_chip *chip_info; 1183 struct spi_imx_chip *chip_info;
1180 struct chip_data *chip; 1184 struct chip_data *chip;
1181 int first_setup = 0; 1185 int first_setup = 0;
@@ -1304,14 +1308,14 @@ static int setup(struct spi_device *spi)
1304 chip->n_bytes = (tmp <= 8) ? 1 : 2; 1308 chip->n_bytes = (tmp <= 8) ? 1 : 2;
1305 1309
1306 /* SPI datarate */ 1310 /* SPI datarate */
1307 tmp = spi_data_rate(spi->max_speed_hz); 1311 tmp = spi_data_rate(drv_data, spi->max_speed_hz);
1308 if (tmp == SPI_CONTROL_DATARATE_BAD) { 1312 if (tmp == SPI_CONTROL_DATARATE_BAD) {
1309 status = -EINVAL; 1313 status = -EINVAL;
1310 dev_err(&spi->dev, 1314 dev_err(&spi->dev,
1311 "setup - " 1315 "setup - "
1312 "HW min speed (%d Hz) exceeds required " 1316 "HW min speed (%d Hz) exceeds required "
1313 "max speed (%d Hz)\n", 1317 "max speed (%d Hz)\n",
1314 spi_speed_hz(SPI_CONTROL_DATARATE_MIN), 1318 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1315 spi->max_speed_hz); 1319 spi->max_speed_hz);
1316 if (first_setup) 1320 if (first_setup)
1317 goto err_first_setup; 1321 goto err_first_setup;
@@ -1321,7 +1325,7 @@ static int setup(struct spi_device *spi)
1321 } else { 1325 } else {
1322 u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp); 1326 u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
1323 /* Actual rounded max_speed_hz */ 1327 /* Actual rounded max_speed_hz */
1324 tmp = spi_speed_hz(tmp); 1328 tmp = spi_speed_hz(drv_data, tmp);
1325 spi->max_speed_hz = tmp; 1329 spi->max_speed_hz = tmp;
1326 chip->max_speed_hz = tmp; 1330 chip->max_speed_hz = tmp;
1327 } 1331 }
@@ -1352,7 +1356,7 @@ static int setup(struct spi_device *spi)
1352 chip->period & SPI_PERIOD_WAIT, 1356 chip->period & SPI_PERIOD_WAIT,
1353 spi->mode, 1357 spi->mode,
1354 spi->bits_per_word, 1358 spi->bits_per_word,
1355 spi_speed_hz(SPI_CONTROL_DATARATE_MIN), 1359 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1356 spi->max_speed_hz); 1360 spi->max_speed_hz);
1357 return status; 1361 return status;
1358 1362
@@ -1465,6 +1469,14 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1465 goto err_no_pdata; 1469 goto err_no_pdata;
1466 } 1470 }
1467 1471
1472 drv_data->clk = clk_get(&pdev->dev, "perclk2");
1473 if (IS_ERR(drv_data->clk)) {
1474 dev_err(&pdev->dev, "probe - cannot get get\n");
1475 status = PTR_ERR(drv_data->clk);
1476 goto err_no_clk;
1477 }
1478 clk_enable(drv_data->clk);
1479
1468 /* Allocate master with space for drv_data */ 1480 /* Allocate master with space for drv_data */
1469 master = spi_alloc_master(dev, sizeof(struct driver_data)); 1481 master = spi_alloc_master(dev, sizeof(struct driver_data));
1470 if (!master) { 1482 if (!master) {
@@ -1526,24 +1538,24 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1526 drv_data->rx_channel = -1; 1538 drv_data->rx_channel = -1;
1527 if (platform_info->enable_dma) { 1539 if (platform_info->enable_dma) {
1528 /* Get rx DMA channel */ 1540 /* Get rx DMA channel */
1529 status = imx_dma_request_by_prio(&drv_data->rx_channel, 1541 drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
1530 "spi_imx_rx", DMA_PRIO_HIGH); 1542 DMA_PRIO_HIGH);
1531 if (status < 0) { 1543 if (drv_data->rx_channel < 0) {
1532 dev_err(dev, 1544 dev_err(dev,
1533 "probe - problem (%d) requesting rx channel\n", 1545 "probe - problem (%d) requesting rx channel\n",
1534 status); 1546 drv_data->rx_channel);
1535 goto err_no_rxdma; 1547 goto err_no_rxdma;
1536 } else 1548 } else
1537 imx_dma_setup_handlers(drv_data->rx_channel, NULL, 1549 imx_dma_setup_handlers(drv_data->rx_channel, NULL,
1538 dma_err_handler, drv_data); 1550 dma_err_handler, drv_data);
1539 1551
1540 /* Get tx DMA channel */ 1552 /* Get tx DMA channel */
1541 status = imx_dma_request_by_prio(&drv_data->tx_channel, 1553 drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
1542 "spi_imx_tx", DMA_PRIO_MEDIUM); 1554 DMA_PRIO_MEDIUM);
1543 if (status < 0) { 1555 if (drv_data->tx_channel < 0) {
1544 dev_err(dev, 1556 dev_err(dev,
1545 "probe - problem (%d) requesting tx channel\n", 1557 "probe - problem (%d) requesting tx channel\n",
1546 status); 1558 drv_data->tx_channel);
1547 imx_dma_free(drv_data->rx_channel); 1559 imx_dma_free(drv_data->rx_channel);
1548 goto err_no_txdma; 1560 goto err_no_txdma;
1549 } else 1561 } else
@@ -1623,6 +1635,9 @@ err_no_iores:
1623 spi_master_put(master); 1635 spi_master_put(master);
1624 1636
1625err_no_pdata: 1637err_no_pdata:
1638 clk_disable(drv_data->clk);
1639 clk_put(drv_data->clk);
1640err_no_clk:
1626err_no_mem: 1641err_no_mem:
1627 return status; 1642 return status;
1628} 1643}
@@ -1662,6 +1677,9 @@ static int __exit spi_imx_remove(struct platform_device *pdev)
1662 if (irq >= 0) 1677 if (irq >= 0)
1663 free_irq(irq, drv_data); 1678 free_irq(irq, drv_data);
1664 1679
1680 clk_disable(drv_data->clk);
1681 clk_put(drv_data->clk);
1682
1665 /* Release map resources */ 1683 /* Release map resources */
1666 iounmap(drv_data->regs); 1684 iounmap(drv_data->regs);
1667 release_resource(drv_data->ioarea); 1685 release_resource(drv_data->ioarea);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f5b60c70389b..ddbe1a5e970e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -30,6 +30,7 @@
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/smp_lock.h>
33 34
34#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
35#include <linux/spi/spidev.h> 36#include <linux/spi/spidev.h>
@@ -464,6 +465,7 @@ static int spidev_open(struct inode *inode, struct file *filp)
464 struct spidev_data *spidev; 465 struct spidev_data *spidev;
465 int status = -ENXIO; 466 int status = -ENXIO;
466 467
468 lock_kernel();
467 mutex_lock(&device_list_lock); 469 mutex_lock(&device_list_lock);
468 470
469 list_for_each_entry(spidev, &device_list, device_entry) { 471 list_for_each_entry(spidev, &device_list, device_entry) {
@@ -489,6 +491,7 @@ static int spidev_open(struct inode *inode, struct file *filp)
489 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 491 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
490 492
491 mutex_unlock(&device_list_lock); 493 mutex_unlock(&device_list_lock);
494 unlock_kernel();
492 return status; 495 return status;
493} 496}
494 497
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index bcea8d9b718c..4d74ba36c3a1 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -23,6 +23,7 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/phonedev.h> 24#include <linux/phonedev.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/smp_lock.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/system.h> 28#include <asm/system.h>
28 29
@@ -53,6 +54,7 @@ static int phone_open(struct inode *inode, struct file *file)
53 if (minor >= PHONE_NUM_DEVICES) 54 if (minor >= PHONE_NUM_DEVICES)
54 return -ENODEV; 55 return -ENODEV;
55 56
57 lock_kernel();
56 mutex_lock(&phone_lock); 58 mutex_lock(&phone_lock);
57 p = phone_device[minor]; 59 p = phone_device[minor];
58 if (p) 60 if (p)
@@ -79,6 +81,7 @@ static int phone_open(struct inode *inode, struct file *file)
79 fops_put(old_fops); 81 fops_put(old_fops);
80end: 82end:
81 mutex_unlock(&phone_lock); 83 mutex_unlock(&phone_lock);
84 unlock_kernel();
82 return err; 85 return err;
83} 86}
84 87
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 0a12e90ad416..5a7ca2e6094d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -297,12 +297,17 @@ static int uio_open(struct inode *inode, struct file *filep)
297 struct uio_listener *listener; 297 struct uio_listener *listener;
298 int ret = 0; 298 int ret = 0;
299 299
300 lock_kernel();
300 idev = idr_find(&uio_idr, iminor(inode)); 301 idev = idr_find(&uio_idr, iminor(inode));
301 if (!idev) 302 if (!idev) {
302 return -ENODEV; 303 ret = -ENODEV;
304 goto out;
305 }
303 306
304 if (!try_module_get(idev->owner)) 307 if (!try_module_get(idev->owner)) {
305 return -ENODEV; 308 ret = -ENODEV;
309 goto out;
310 }
306 311
307 listener = kmalloc(sizeof(*listener), GFP_KERNEL); 312 listener = kmalloc(sizeof(*listener), GFP_KERNEL);
308 if (!listener) { 313 if (!listener) {
@@ -319,7 +324,7 @@ static int uio_open(struct inode *inode, struct file *filep)
319 if (ret) 324 if (ret)
320 goto err_infoopen; 325 goto err_infoopen;
321 } 326 }
322 327 unlock_kernel();
323 return 0; 328 return 0;
324 329
325err_infoopen: 330err_infoopen:
@@ -329,6 +334,8 @@ err_alloc_listener:
329 334
330 module_put(idev->owner); 335 module_put(idev->owner);
331 336
337out:
338 unlock_kernel();
332 return ret; 339 return ret;
333} 340}
334 341
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index de17738f3acb..9218cca21043 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -565,6 +565,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
565 struct dev_state *ps; 565 struct dev_state *ps;
566 int ret; 566 int ret;
567 567
568 lock_kernel();
568 /* Protect against simultaneous removal or release */ 569 /* Protect against simultaneous removal or release */
569 mutex_lock(&usbfs_mutex); 570 mutex_lock(&usbfs_mutex);
570 571
@@ -611,6 +612,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
611 if (ret) 612 if (ret)
612 kfree(ps); 613 kfree(ps);
613 mutex_unlock(&usbfs_mutex); 614 mutex_unlock(&usbfs_mutex);
615 unlock_kernel();
614 return ret; 616 return ret;
615} 617}
616 618
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 8133c99c6c5c..c6a95395e52a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/rwsem.h> 20#include <linux/rwsem.h>
21#include <linux/smp_lock.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
22 23
23#include "usb.h" 24#include "usb.h"
@@ -33,6 +34,7 @@ static int usb_open(struct inode * inode, struct file * file)
33 int err = -ENODEV; 34 int err = -ENODEV;
34 const struct file_operations *old_fops, *new_fops = NULL; 35 const struct file_operations *old_fops, *new_fops = NULL;
35 36
37 lock_kernel();
36 down_read(&minor_rwsem); 38 down_read(&minor_rwsem);
37 c = usb_minors[minor]; 39 c = usb_minors[minor];
38 40
@@ -51,6 +53,7 @@ static int usb_open(struct inode * inode, struct file * file)
51 fops_put(old_fops); 53 fops_put(old_fops);
52 done: 54 done:
53 up_read(&minor_rwsem); 55 up_read(&minor_rwsem);
56 unlock_kernel();
54 return err; 57 return err;
55} 58}
56 59
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 6e784d2db423..d6bab0d5f453 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -118,10 +118,10 @@ config USB_AMD5536UDC
118config USB_GADGET_ATMEL_USBA 118config USB_GADGET_ATMEL_USBA
119 boolean "Atmel USBA" 119 boolean "Atmel USBA"
120 select USB_GADGET_DUALSPEED 120 select USB_GADGET_DUALSPEED
121 depends on AVR32 || ARCH_AT91CAP9 121 depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL
122 help 122 help
123 USBA is the integrated high-speed USB Device controller on 123 USBA is the integrated high-speed USB Device controller on
124 the AT32AP700x and AT91CAP9 processors from Atmel. 124 the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
125 125
126config USB_ATMEL_USBA 126config USB_ATMEL_USBA
127 tristate 127 tristate
@@ -172,7 +172,7 @@ config USB_NET2280
172 default USB_GADGET 172 default USB_GADGET
173 select USB_GADGET_SELECTED 173 select USB_GADGET_SELECTED
174 174
175config USB_GADGET_PXA2XX 175config USB_GADGET_PXA25X
176 boolean "PXA 25x or IXP 4xx" 176 boolean "PXA 25x or IXP 4xx"
177 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX 177 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
178 help 178 help
@@ -184,19 +184,19 @@ config USB_GADGET_PXA2XX
184 zero (for control transfers). 184 zero (for control transfers).
185 185
186 Say "y" to link the driver statically, or "m" to build a 186 Say "y" to link the driver statically, or "m" to build a
187 dynamically linked module called "pxa2xx_udc" and force all 187 dynamically linked module called "pxa25x_udc" and force all
188 gadget drivers to also be dynamically linked. 188 gadget drivers to also be dynamically linked.
189 189
190config USB_PXA2XX 190config USB_PXA25X
191 tristate 191 tristate
192 depends on USB_GADGET_PXA2XX 192 depends on USB_GADGET_PXA25X
193 default USB_GADGET 193 default USB_GADGET
194 select USB_GADGET_SELECTED 194 select USB_GADGET_SELECTED
195 195
196# if there's only one gadget driver, using only two bulk endpoints, 196# if there's only one gadget driver, using only two bulk endpoints,
197# don't waste memory for the other endpoints 197# don't waste memory for the other endpoints
198config USB_PXA2XX_SMALL 198config USB_PXA25X_SMALL
199 depends on USB_GADGET_PXA2XX 199 depends on USB_GADGET_PXA25X
200 bool 200 bool
201 default n if USB_ETH_RNDIS 201 default n if USB_ETH_RNDIS
202 default y if USB_ZERO 202 default y if USB_ZERO
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 12357255d740..e258afd25faf 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -8,7 +8,7 @@ endif
8obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o 8obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
9obj-$(CONFIG_USB_NET2280) += net2280.o 9obj-$(CONFIG_USB_NET2280) += net2280.o
10obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o 10obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
11obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o 11obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
12obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o 12obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
13obj-$(CONFIG_USB_GOKU) += goku_udc.o 13obj-$(CONFIG_USB_GOKU) += goku_udc.o
14obj-$(CONFIG_USB_OMAP) += omap_udc.o 14obj-$(CONFIG_USB_OMAP) += omap_udc.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 274c60a970cd..b6b2a0a5ba37 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -888,7 +888,7 @@ static void pullup(struct at91_udc *udc, int is_on)
888 at91_udp_write(udc, AT91_UDP_TXVC, 0); 888 at91_udp_write(udc, AT91_UDP_TXVC, 0);
889 if (cpu_is_at91rm9200()) 889 if (cpu_is_at91rm9200())
890 gpio_set_value(udc->board.pullup_pin, active); 890 gpio_set_value(udc->board.pullup_pin, active);
891 else if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) { 891 else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
892 u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC); 892 u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC);
893 893
894 txvc |= AT91_UDP_TXVC_PUON; 894 txvc |= AT91_UDP_TXVC_PUON;
@@ -906,7 +906,7 @@ static void pullup(struct at91_udc *udc, int is_on)
906 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); 906 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
907 if (cpu_is_at91rm9200()) 907 if (cpu_is_at91rm9200())
908 gpio_set_value(udc->board.pullup_pin, !active); 908 gpio_set_value(udc->board.pullup_pin, !active);
909 else if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) { 909 else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
910 u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC); 910 u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC);
911 911
912 txvc &= ~AT91_UDP_TXVC_PUON; 912 txvc &= ~AT91_UDP_TXVC_PUON;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 8d61ea67a817..4ce3950b997f 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -262,7 +262,7 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
262/* For CDC-incapable hardware, choose the simple cdc subset. 262/* For CDC-incapable hardware, choose the simple cdc subset.
263 * Anything that talks bulk (without notable bugs) can do this. 263 * Anything that talks bulk (without notable bugs) can do this.
264 */ 264 */
265#ifdef CONFIG_USB_GADGET_PXA2XX 265#ifdef CONFIG_USB_GADGET_PXA25X
266#define DEV_CONFIG_SUBSET 266#define DEV_CONFIG_SUBSET
267#endif 267#endif
268 268
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f7f159c1002b..ca5149ea7312 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -29,8 +29,8 @@
29#define gadget_is_dummy(g) 0 29#define gadget_is_dummy(g) 0
30#endif 30#endif
31 31
32#ifdef CONFIG_USB_GADGET_PXA2XX 32#ifdef CONFIG_USB_GADGET_PXA25X
33#define gadget_is_pxa(g) !strcmp("pxa2xx_udc", (g)->name) 33#define gadget_is_pxa(g) !strcmp("pxa25x_udc", (g)->name)
34#else 34#else
35#define gadget_is_pxa(g) 0 35#define gadget_is_pxa(g) 0
36#endif 36#endif
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 69b0a2754f2a..f132a9219e11 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1501,7 +1501,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1501 } 1501 }
1502 break; 1502 break;
1503 1503
1504#ifndef CONFIG_USB_GADGET_PXA2XX 1504#ifndef CONFIG_USB_GADGET_PXA25X
1505 /* PXA automagically handles this request too */ 1505 /* PXA automagically handles this request too */
1506 case USB_REQ_GET_CONFIGURATION: 1506 case USB_REQ_GET_CONFIGURATION:
1507 if (ctrl->bRequestType != 0x80) 1507 if (ctrl->bRequestType != 0x80)
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 881d74c3d964..03a7f49d207d 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -135,13 +135,17 @@ static void use_ep(struct omap_ep *ep, u16 select)
135 135
136 if (ep->bEndpointAddress & USB_DIR_IN) 136 if (ep->bEndpointAddress & USB_DIR_IN)
137 num |= UDC_EP_DIR; 137 num |= UDC_EP_DIR;
138 UDC_EP_NUM_REG = num | select; 138 omap_writew(num | select, UDC_EP_NUM);
139 /* when select, MUST deselect later !! */ 139 /* when select, MUST deselect later !! */
140} 140}
141 141
142static inline void deselect_ep(void) 142static inline void deselect_ep(void)
143{ 143{
144 UDC_EP_NUM_REG &= ~UDC_EP_SEL; 144 u16 w;
145
146 w = omap_readw(UDC_EP_NUM);
147 w &= ~UDC_EP_SEL;
148 omap_writew(w, UDC_EP_NUM);
145 /* 6 wait states before TX will happen */ 149 /* 6 wait states before TX will happen */
146} 150}
147 151
@@ -216,7 +220,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
216 ep->has_dma = 0; 220 ep->has_dma = 0;
217 ep->lch = -1; 221 ep->lch = -1;
218 use_ep(ep, UDC_EP_SEL); 222 use_ep(ep, UDC_EP_SEL);
219 UDC_CTRL_REG = udc->clr_halt; 223 omap_writew(udc->clr_halt, UDC_CTRL);
220 ep->ackwait = 0; 224 ep->ackwait = 0;
221 deselect_ep(); 225 deselect_ep();
222 226
@@ -232,7 +236,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
232 if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC 236 if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
233 && !ep->has_dma 237 && !ep->has_dma
234 && !(ep->bEndpointAddress & USB_DIR_IN)) { 238 && !(ep->bEndpointAddress & USB_DIR_IN)) {
235 UDC_CTRL_REG = UDC_SET_FIFO_EN; 239 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
236 ep->ackwait = 1 + ep->double_buf; 240 ep->ackwait = 1 + ep->double_buf;
237 } 241 }
238 242
@@ -259,7 +263,7 @@ static int omap_ep_disable(struct usb_ep *_ep)
259 nuke (ep, -ESHUTDOWN); 263 nuke (ep, -ESHUTDOWN);
260 ep->ep.maxpacket = ep->maxpacket; 264 ep->ep.maxpacket = ep->maxpacket;
261 ep->has_dma = 0; 265 ep->has_dma = 0;
262 UDC_CTRL_REG = UDC_SET_HALT; 266 omap_writew(UDC_SET_HALT, UDC_CTRL);
263 list_del_init(&ep->iso); 267 list_del_init(&ep->iso);
264 del_timer(&ep->timer); 268 del_timer(&ep->timer);
265 269
@@ -360,13 +364,13 @@ write_packet(u8 *buf, struct omap_req *req, unsigned max)
360 if (likely((((int)buf) & 1) == 0)) { 364 if (likely((((int)buf) & 1) == 0)) {
361 wp = (u16 *)buf; 365 wp = (u16 *)buf;
362 while (max >= 2) { 366 while (max >= 2) {
363 UDC_DATA_REG = *wp++; 367 omap_writew(*wp++, UDC_DATA);
364 max -= 2; 368 max -= 2;
365 } 369 }
366 buf = (u8 *)wp; 370 buf = (u8 *)wp;
367 } 371 }
368 while (max--) 372 while (max--)
369 *(volatile u8 *)&UDC_DATA_REG = *buf++; 373 omap_writeb(*buf++, UDC_DATA);
370 return len; 374 return len;
371} 375}
372 376
@@ -385,13 +389,13 @@ static int write_fifo(struct omap_ep *ep, struct omap_req *req)
385 prefetch(buf); 389 prefetch(buf);
386 390
387 /* PIO-IN isn't double buffered except for iso */ 391 /* PIO-IN isn't double buffered except for iso */
388 ep_stat = UDC_STAT_FLG_REG; 392 ep_stat = omap_readw(UDC_STAT_FLG);
389 if (ep_stat & UDC_FIFO_UNWRITABLE) 393 if (ep_stat & UDC_FIFO_UNWRITABLE)
390 return 0; 394 return 0;
391 395
392 count = ep->ep.maxpacket; 396 count = ep->ep.maxpacket;
393 count = write_packet(buf, req, count); 397 count = write_packet(buf, req, count);
394 UDC_CTRL_REG = UDC_SET_FIFO_EN; 398 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
395 ep->ackwait = 1; 399 ep->ackwait = 1;
396 400
397 /* last packet is often short (sometimes a zlp) */ 401 /* last packet is often short (sometimes a zlp) */
@@ -425,13 +429,13 @@ read_packet(u8 *buf, struct omap_req *req, unsigned avail)
425 if (likely((((int)buf) & 1) == 0)) { 429 if (likely((((int)buf) & 1) == 0)) {
426 wp = (u16 *)buf; 430 wp = (u16 *)buf;
427 while (avail >= 2) { 431 while (avail >= 2) {
428 *wp++ = UDC_DATA_REG; 432 *wp++ = omap_readw(UDC_DATA);
429 avail -= 2; 433 avail -= 2;
430 } 434 }
431 buf = (u8 *)wp; 435 buf = (u8 *)wp;
432 } 436 }
433 while (avail--) 437 while (avail--)
434 *buf++ = *(volatile u8 *)&UDC_DATA_REG; 438 *buf++ = omap_readb(UDC_DATA);
435 return len; 439 return len;
436} 440}
437 441
@@ -446,7 +450,7 @@ static int read_fifo(struct omap_ep *ep, struct omap_req *req)
446 prefetchw(buf); 450 prefetchw(buf);
447 451
448 for (;;) { 452 for (;;) {
449 u16 ep_stat = UDC_STAT_FLG_REG; 453 u16 ep_stat = omap_readw(UDC_STAT_FLG);
450 454
451 is_last = 0; 455 is_last = 0;
452 if (ep_stat & FIFO_EMPTY) { 456 if (ep_stat & FIFO_EMPTY) {
@@ -460,7 +464,7 @@ static int read_fifo(struct omap_ep *ep, struct omap_req *req)
460 if (ep_stat & UDC_FIFO_FULL) 464 if (ep_stat & UDC_FIFO_FULL)
461 avail = ep->ep.maxpacket; 465 avail = ep->ep.maxpacket;
462 else { 466 else {
463 avail = UDC_RXFSTAT_REG; 467 avail = omap_readw(UDC_RXFSTAT);
464 ep->fnf = ep->double_buf; 468 ep->fnf = ep->double_buf;
465 } 469 }
466 count = read_packet(buf, req, avail); 470 count = read_packet(buf, req, avail);
@@ -473,7 +477,7 @@ static int read_fifo(struct omap_ep *ep, struct omap_req *req)
473 req->req.status = -EOVERFLOW; 477 req->req.status = -EOVERFLOW;
474 avail -= count; 478 avail -= count;
475 while (avail--) 479 while (avail--)
476 (void) *(volatile u8 *)&UDC_DATA_REG; 480 omap_readw(UDC_DATA);
477 } 481 }
478 } else if (req->req.length == req->req.actual) 482 } else if (req->req.length == req->req.actual)
479 is_last = 1; 483 is_last = 1;
@@ -491,32 +495,6 @@ static int read_fifo(struct omap_ep *ep, struct omap_req *req)
491 495
492/*-------------------------------------------------------------------------*/ 496/*-------------------------------------------------------------------------*/
493 497
494static inline dma_addr_t dma_csac(unsigned lch)
495{
496 dma_addr_t csac;
497
498 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
499 * read before the DMA controller finished disabling the channel.
500 */
501 csac = OMAP_DMA_CSAC_REG(lch);
502 if (csac == 0)
503 csac = OMAP_DMA_CSAC_REG(lch);
504 return csac;
505}
506
507static inline dma_addr_t dma_cdac(unsigned lch)
508{
509 dma_addr_t cdac;
510
511 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
512 * read before the DMA controller finished disabling the channel.
513 */
514 cdac = OMAP_DMA_CDAC_REG(lch);
515 if (cdac == 0)
516 cdac = OMAP_DMA_CDAC_REG(lch);
517 return cdac;
518}
519
520static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start) 498static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
521{ 499{
522 dma_addr_t end; 500 dma_addr_t end;
@@ -527,7 +505,7 @@ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
527 if (cpu_is_omap15xx()) 505 if (cpu_is_omap15xx())
528 return 0; 506 return 0;
529 507
530 end = dma_csac(ep->lch); 508 end = omap_get_dma_src_pos(ep->lch);
531 if (end == ep->dma_counter) 509 if (end == ep->dma_counter)
532 return 0; 510 return 0;
533 511
@@ -537,15 +515,11 @@ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
537 return end - start; 515 return end - start;
538} 516}
539 517
540#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
541 ? OMAP_DMA_CSAC_REG(x) /* really: CPC */ \
542 : dma_cdac(x))
543
544static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) 518static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
545{ 519{
546 dma_addr_t end; 520 dma_addr_t end;
547 521
548 end = DMA_DEST_LAST(ep->lch); 522 end = omap_get_dma_dst_pos(ep->lch);
549 if (end == ep->dma_counter) 523 if (end == ep->dma_counter)
550 return 0; 524 return 0;
551 525
@@ -565,7 +539,7 @@ static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
565 539
566static void next_in_dma(struct omap_ep *ep, struct omap_req *req) 540static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
567{ 541{
568 u16 txdma_ctrl; 542 u16 txdma_ctrl, w;
569 unsigned length = req->req.length - req->req.actual; 543 unsigned length = req->req.length - req->req.actual;
570 const int sync_mode = cpu_is_omap15xx() 544 const int sync_mode = cpu_is_omap15xx()
571 ? OMAP_DMA_SYNC_FRAME 545 ? OMAP_DMA_SYNC_FRAME
@@ -596,14 +570,18 @@ static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
596 0, 0); 570 0, 0);
597 571
598 omap_start_dma(ep->lch); 572 omap_start_dma(ep->lch);
599 ep->dma_counter = dma_csac(ep->lch); 573 ep->dma_counter = omap_get_dma_src_pos(ep->lch);
600 UDC_DMA_IRQ_EN_REG |= UDC_TX_DONE_IE(ep->dma_channel); 574 w = omap_readw(UDC_DMA_IRQ_EN);
601 UDC_TXDMA_REG(ep->dma_channel) = UDC_TXN_START | txdma_ctrl; 575 w |= UDC_TX_DONE_IE(ep->dma_channel);
576 omap_writew(w, UDC_DMA_IRQ_EN);
577 omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
602 req->dma_bytes = length; 578 req->dma_bytes = length;
603} 579}
604 580
605static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status) 581static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
606{ 582{
583 u16 w;
584
607 if (status == 0) { 585 if (status == 0) {
608 req->req.actual += req->dma_bytes; 586 req->req.actual += req->dma_bytes;
609 587
@@ -620,7 +598,9 @@ static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
620 598
621 /* tx completion */ 599 /* tx completion */
622 omap_stop_dma(ep->lch); 600 omap_stop_dma(ep->lch);
623 UDC_DMA_IRQ_EN_REG &= ~UDC_TX_DONE_IE(ep->dma_channel); 601 w = omap_readw(UDC_DMA_IRQ_EN);
602 w &= ~UDC_TX_DONE_IE(ep->dma_channel);
603 omap_writew(w, UDC_DMA_IRQ_EN);
624 done(ep, req, status); 604 done(ep, req, status);
625} 605}
626 606
@@ -628,6 +608,7 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
628{ 608{
629 unsigned packets = req->req.length - req->req.actual; 609 unsigned packets = req->req.length - req->req.actual;
630 int dma_trigger = 0; 610 int dma_trigger = 0;
611 u16 w;
631 612
632 if (cpu_is_omap24xx()) 613 if (cpu_is_omap24xx())
633 dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel); 614 dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel);
@@ -654,12 +635,14 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
654 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, 635 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
655 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, 636 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
656 0, 0); 637 0, 0);
657 ep->dma_counter = DMA_DEST_LAST(ep->lch); 638 ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
658 639
659 UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1); 640 omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
660 UDC_DMA_IRQ_EN_REG |= UDC_RX_EOT_IE(ep->dma_channel); 641 w = omap_readw(UDC_DMA_IRQ_EN);
661 UDC_EP_NUM_REG = (ep->bEndpointAddress & 0xf); 642 w |= UDC_RX_EOT_IE(ep->dma_channel);
662 UDC_CTRL_REG = UDC_SET_FIFO_EN; 643 omap_writew(w, UDC_DMA_IRQ_EN);
644 omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
645 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
663 646
664 omap_start_dma(ep->lch); 647 omap_start_dma(ep->lch);
665} 648}
@@ -667,7 +650,7 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
667static void 650static void
668finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one) 651finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
669{ 652{
670 u16 count; 653 u16 count, w;
671 654
672 if (status == 0) 655 if (status == 0)
673 ep->dma_counter = (u16) (req->req.dma + req->req.actual); 656 ep->dma_counter = (u16) (req->req.dma + req->req.actual);
@@ -686,13 +669,15 @@ finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
686 return; 669 return;
687 670
688 /* rx completion */ 671 /* rx completion */
689 UDC_DMA_IRQ_EN_REG &= ~UDC_RX_EOT_IE(ep->dma_channel); 672 w = omap_readw(UDC_DMA_IRQ_EN);
673 w &= ~UDC_RX_EOT_IE(ep->dma_channel);
674 omap_writew(w, UDC_DMA_IRQ_EN);
690 done(ep, req, status); 675 done(ep, req, status);
691} 676}
692 677
693static void dma_irq(struct omap_udc *udc, u16 irq_src) 678static void dma_irq(struct omap_udc *udc, u16 irq_src)
694{ 679{
695 u16 dman_stat = UDC_DMAN_STAT_REG; 680 u16 dman_stat = omap_readw(UDC_DMAN_STAT);
696 struct omap_ep *ep; 681 struct omap_ep *ep;
697 struct omap_req *req; 682 struct omap_req *req;
698 683
@@ -706,7 +691,7 @@ static void dma_irq(struct omap_udc *udc, u16 irq_src)
706 struct omap_req, queue); 691 struct omap_req, queue);
707 finish_in_dma(ep, req, 0); 692 finish_in_dma(ep, req, 0);
708 } 693 }
709 UDC_IRQ_SRC_REG = UDC_TXN_DONE; 694 omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
710 695
711 if (!list_empty (&ep->queue)) { 696 if (!list_empty (&ep->queue)) {
712 req = container_of(ep->queue.next, 697 req = container_of(ep->queue.next,
@@ -725,7 +710,7 @@ static void dma_irq(struct omap_udc *udc, u16 irq_src)
725 struct omap_req, queue); 710 struct omap_req, queue);
726 finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB); 711 finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
727 } 712 }
728 UDC_IRQ_SRC_REG = UDC_RXN_EOT; 713 omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
729 714
730 if (!list_empty (&ep->queue)) { 715 if (!list_empty (&ep->queue)) {
731 req = container_of(ep->queue.next, 716 req = container_of(ep->queue.next,
@@ -739,7 +724,7 @@ static void dma_irq(struct omap_udc *udc, u16 irq_src)
739 ep->irqs++; 724 ep->irqs++;
740 /* omap15xx does this unasked... */ 725 /* omap15xx does this unasked... */
741 VDBG("%s, RX_CNT irq?\n", ep->ep.name); 726 VDBG("%s, RX_CNT irq?\n", ep->ep.name);
742 UDC_IRQ_SRC_REG = UDC_RXN_CNT; 727 omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
743 } 728 }
744} 729}
745 730
@@ -762,9 +747,9 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
762 747
763 is_in = ep->bEndpointAddress & USB_DIR_IN; 748 is_in = ep->bEndpointAddress & USB_DIR_IN;
764 if (is_in) 749 if (is_in)
765 reg = UDC_TXDMA_CFG_REG; 750 reg = omap_readw(UDC_TXDMA_CFG);
766 else 751 else
767 reg = UDC_RXDMA_CFG_REG; 752 reg = omap_readw(UDC_RXDMA_CFG);
768 reg |= UDC_DMA_REQ; /* "pulse" activated */ 753 reg |= UDC_DMA_REQ; /* "pulse" activated */
769 754
770 ep->dma_channel = 0; 755 ep->dma_channel = 0;
@@ -792,7 +777,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
792 status = omap_request_dma(dma_channel, 777 status = omap_request_dma(dma_channel,
793 ep->ep.name, dma_error, ep, &ep->lch); 778 ep->ep.name, dma_error, ep, &ep->lch);
794 if (status == 0) { 779 if (status == 0) {
795 UDC_TXDMA_CFG_REG = reg; 780 omap_writew(reg, UDC_TXDMA_CFG);
796 /* EMIFF or SDRC */ 781 /* EMIFF or SDRC */
797 omap_set_dma_src_burst_mode(ep->lch, 782 omap_set_dma_src_burst_mode(ep->lch,
798 OMAP_DMA_DATA_BURST_4); 783 OMAP_DMA_DATA_BURST_4);
@@ -801,7 +786,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
801 omap_set_dma_dest_params(ep->lch, 786 omap_set_dma_dest_params(ep->lch,
802 OMAP_DMA_PORT_TIPB, 787 OMAP_DMA_PORT_TIPB,
803 OMAP_DMA_AMODE_CONSTANT, 788 OMAP_DMA_AMODE_CONSTANT,
804 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG), 789 (unsigned long) io_v2p(UDC_DATA_DMA),
805 0, 0); 790 0, 0);
806 } 791 }
807 } else { 792 } else {
@@ -813,12 +798,12 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
813 status = omap_request_dma(dma_channel, 798 status = omap_request_dma(dma_channel,
814 ep->ep.name, dma_error, ep, &ep->lch); 799 ep->ep.name, dma_error, ep, &ep->lch);
815 if (status == 0) { 800 if (status == 0) {
816 UDC_RXDMA_CFG_REG = reg; 801 omap_writew(reg, UDC_RXDMA_CFG);
817 /* TIPB */ 802 /* TIPB */
818 omap_set_dma_src_params(ep->lch, 803 omap_set_dma_src_params(ep->lch,
819 OMAP_DMA_PORT_TIPB, 804 OMAP_DMA_PORT_TIPB,
820 OMAP_DMA_AMODE_CONSTANT, 805 OMAP_DMA_AMODE_CONSTANT,
821 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG), 806 (unsigned long) io_v2p(UDC_DATA_DMA),
822 0, 0); 807 0, 0);
823 /* EMIFF or SDRC */ 808 /* EMIFF or SDRC */
824 omap_set_dma_dest_burst_mode(ep->lch, 809 omap_set_dma_dest_burst_mode(ep->lch,
@@ -834,7 +819,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
834 819
835 /* channel type P: hw synch (fifo) */ 820 /* channel type P: hw synch (fifo) */
836 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) 821 if (cpu_class_is_omap1() && !cpu_is_omap15xx())
837 OMAP1_DMA_LCH_CTRL_REG(ep->lch) = 2; 822 omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
838 } 823 }
839 824
840just_restart: 825just_restart:
@@ -860,7 +845,7 @@ just_restart:
860 (is_in ? write_fifo : read_fifo)(ep, req); 845 (is_in ? write_fifo : read_fifo)(ep, req);
861 deselect_ep(); 846 deselect_ep();
862 if (!is_in) { 847 if (!is_in) {
863 UDC_CTRL_REG = UDC_SET_FIFO_EN; 848 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
864 ep->ackwait = 1 + ep->double_buf; 849 ep->ackwait = 1 + ep->double_buf;
865 } 850 }
866 /* IN: 6 wait states before it'll tx */ 851 /* IN: 6 wait states before it'll tx */
@@ -881,7 +866,7 @@ static void dma_channel_release(struct omap_ep *ep)
881 else 866 else
882 req = NULL; 867 req = NULL;
883 868
884 active = ((1 << 7) & OMAP_DMA_CCR_REG(ep->lch)) != 0; 869 active = omap_get_dma_active_status(ep->lch);
885 870
886 DBG("%s release %s %cxdma%d %p\n", ep->ep.name, 871 DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
887 active ? "active" : "idle", 872 active ? "active" : "idle",
@@ -894,23 +879,25 @@ static void dma_channel_release(struct omap_ep *ep)
894 879
895 /* wait till current packet DMA finishes, and fifo empties */ 880 /* wait till current packet DMA finishes, and fifo empties */
896 if (ep->bEndpointAddress & USB_DIR_IN) { 881 if (ep->bEndpointAddress & USB_DIR_IN) {
897 UDC_TXDMA_CFG_REG = (UDC_TXDMA_CFG_REG & ~mask) | UDC_DMA_REQ; 882 omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
883 UDC_TXDMA_CFG);
898 884
899 if (req) { 885 if (req) {
900 finish_in_dma(ep, req, -ECONNRESET); 886 finish_in_dma(ep, req, -ECONNRESET);
901 887
902 /* clear FIFO; hosts probably won't empty it */ 888 /* clear FIFO; hosts probably won't empty it */
903 use_ep(ep, UDC_EP_SEL); 889 use_ep(ep, UDC_EP_SEL);
904 UDC_CTRL_REG = UDC_CLR_EP; 890 omap_writew(UDC_CLR_EP, UDC_CTRL);
905 deselect_ep(); 891 deselect_ep();
906 } 892 }
907 while (UDC_TXDMA_CFG_REG & mask) 893 while (omap_readw(UDC_TXDMA_CFG) & mask)
908 udelay(10); 894 udelay(10);
909 } else { 895 } else {
910 UDC_RXDMA_CFG_REG = (UDC_RXDMA_CFG_REG & ~mask) | UDC_DMA_REQ; 896 omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
897 UDC_RXDMA_CFG);
911 898
912 /* dma empties the fifo */ 899 /* dma empties the fifo */
913 while (UDC_RXDMA_CFG_REG & mask) 900 while (omap_readw(UDC_RXDMA_CFG) & mask)
914 udelay(10); 901 udelay(10);
915 if (req) 902 if (req)
916 finish_out_dma(ep, req, -ECONNRESET, 0); 903 finish_out_dma(ep, req, -ECONNRESET, 0);
@@ -997,9 +984,13 @@ omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
997 req->req.actual = 0; 984 req->req.actual = 0;
998 985
999 /* maybe kickstart non-iso i/o queues */ 986 /* maybe kickstart non-iso i/o queues */
1000 if (is_iso) 987 if (is_iso) {
1001 UDC_IRQ_EN_REG |= UDC_SOF_IE; 988 u16 w;
1002 else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) { 989
990 w = omap_readw(UDC_IRQ_EN);
991 w |= UDC_SOF_IE;
992 omap_writew(w, UDC_IRQ_EN);
993 } else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
1003 int is_in; 994 int is_in;
1004 995
1005 if (ep->bEndpointAddress == 0) { 996 if (ep->bEndpointAddress == 0) {
@@ -1017,23 +1008,23 @@ omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1017 * requests to non-control endpoints 1008 * requests to non-control endpoints
1018 */ 1009 */
1019 if (udc->ep0_set_config) { 1010 if (udc->ep0_set_config) {
1020 u16 irq_en = UDC_IRQ_EN_REG; 1011 u16 irq_en = omap_readw(UDC_IRQ_EN);
1021 1012
1022 irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE; 1013 irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
1023 if (!udc->ep0_reset_config) 1014 if (!udc->ep0_reset_config)
1024 irq_en |= UDC_EPN_RX_IE 1015 irq_en |= UDC_EPN_RX_IE
1025 | UDC_EPN_TX_IE; 1016 | UDC_EPN_TX_IE;
1026 UDC_IRQ_EN_REG = irq_en; 1017 omap_writew(irq_en, UDC_IRQ_EN);
1027 } 1018 }
1028 1019
1029 /* STATUS for zero length DATA stages is 1020 /* STATUS for zero length DATA stages is
1030 * always an IN ... even for IN transfers, 1021 * always an IN ... even for IN transfers,
1031 * a weird case which seem to stall OMAP. 1022 * a weird case which seem to stall OMAP.
1032 */ 1023 */
1033 UDC_EP_NUM_REG = (UDC_EP_SEL|UDC_EP_DIR); 1024 omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
1034 UDC_CTRL_REG = UDC_CLR_EP; 1025 omap_writew(UDC_CLR_EP, UDC_CTRL);
1035 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1026 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1036 UDC_EP_NUM_REG = UDC_EP_DIR; 1027 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1037 1028
1038 /* cleanup */ 1029 /* cleanup */
1039 udc->ep0_pending = 0; 1030 udc->ep0_pending = 0;
@@ -1042,11 +1033,11 @@ omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1042 1033
1043 /* non-empty DATA stage */ 1034 /* non-empty DATA stage */
1044 } else if (is_in) { 1035 } else if (is_in) {
1045 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR; 1036 omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
1046 } else { 1037 } else {
1047 if (udc->ep0_setup) 1038 if (udc->ep0_setup)
1048 goto irq_wait; 1039 goto irq_wait;
1049 UDC_EP_NUM_REG = UDC_EP_SEL; 1040 omap_writew(UDC_EP_SEL, UDC_EP_NUM);
1050 } 1041 }
1051 } else { 1042 } else {
1052 is_in = ep->bEndpointAddress & USB_DIR_IN; 1043 is_in = ep->bEndpointAddress & USB_DIR_IN;
@@ -1062,7 +1053,7 @@ omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1062 req = NULL; 1053 req = NULL;
1063 deselect_ep(); 1054 deselect_ep();
1064 if (!is_in) { 1055 if (!is_in) {
1065 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1056 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1066 ep->ackwait = 1 + ep->double_buf; 1057 ep->ackwait = 1 + ep->double_buf;
1067 } 1058 }
1068 /* IN: 6 wait states before it'll tx */ 1059 /* IN: 6 wait states before it'll tx */
@@ -1130,9 +1121,9 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1130 else if (value) { 1121 else if (value) {
1131 if (ep->udc->ep0_set_config) { 1122 if (ep->udc->ep0_set_config) {
1132 WARN("error changing config?\n"); 1123 WARN("error changing config?\n");
1133 UDC_SYSCON2_REG = UDC_CLR_CFG; 1124 omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
1134 } 1125 }
1135 UDC_SYSCON2_REG = UDC_STALL_CMD; 1126 omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
1136 ep->udc->ep0_pending = 0; 1127 ep->udc->ep0_pending = 0;
1137 status = 0; 1128 status = 0;
1138 } else /* NOP */ 1129 } else /* NOP */
@@ -1159,8 +1150,8 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1159 channel = 0; 1150 channel = 0;
1160 1151
1161 use_ep(ep, UDC_EP_SEL); 1152 use_ep(ep, UDC_EP_SEL);
1162 if (UDC_STAT_FLG_REG & UDC_NON_ISO_FIFO_EMPTY) { 1153 if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
1163 UDC_CTRL_REG = UDC_SET_HALT; 1154 omap_writew(UDC_SET_HALT, UDC_CTRL);
1164 status = 0; 1155 status = 0;
1165 } else 1156 } else
1166 status = -EAGAIN; 1157 status = -EAGAIN;
@@ -1170,10 +1161,10 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1170 dma_channel_claim(ep, channel); 1161 dma_channel_claim(ep, channel);
1171 } else { 1162 } else {
1172 use_ep(ep, 0); 1163 use_ep(ep, 0);
1173 UDC_CTRL_REG = ep->udc->clr_halt; 1164 omap_writew(ep->udc->clr_halt, UDC_CTRL);
1174 ep->ackwait = 0; 1165 ep->ackwait = 0;
1175 if (!(ep->bEndpointAddress & USB_DIR_IN)) { 1166 if (!(ep->bEndpointAddress & USB_DIR_IN)) {
1176 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1167 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1177 ep->ackwait = 1 + ep->double_buf; 1168 ep->ackwait = 1 + ep->double_buf;
1178 } 1169 }
1179 } 1170 }
@@ -1205,7 +1196,7 @@ static struct usb_ep_ops omap_ep_ops = {
1205 1196
1206static int omap_get_frame(struct usb_gadget *gadget) 1197static int omap_get_frame(struct usb_gadget *gadget)
1207{ 1198{
1208 u16 sof = UDC_SOF_REG; 1199 u16 sof = omap_readw(UDC_SOF);
1209 return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC; 1200 return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
1210} 1201}
1211 1202
@@ -1224,7 +1215,7 @@ static int omap_wakeup(struct usb_gadget *gadget)
1224 */ 1215 */
1225 if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) { 1216 if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
1226 DBG("remote wakeup...\n"); 1217 DBG("remote wakeup...\n");
1227 UDC_SYSCON2_REG = UDC_RMT_WKP; 1218 omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
1228 retval = 0; 1219 retval = 0;
1229 } 1220 }
1230 1221
@@ -1247,12 +1238,12 @@ omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
1247 1238
1248 udc = container_of(gadget, struct omap_udc, gadget); 1239 udc = container_of(gadget, struct omap_udc, gadget);
1249 spin_lock_irqsave(&udc->lock, flags); 1240 spin_lock_irqsave(&udc->lock, flags);
1250 syscon1 = UDC_SYSCON1_REG; 1241 syscon1 = omap_readw(UDC_SYSCON1);
1251 if (is_selfpowered) 1242 if (is_selfpowered)
1252 syscon1 |= UDC_SELF_PWR; 1243 syscon1 |= UDC_SELF_PWR;
1253 else 1244 else
1254 syscon1 &= ~UDC_SELF_PWR; 1245 syscon1 &= ~UDC_SELF_PWR;
1255 UDC_SYSCON1_REG = syscon1; 1246 omap_writew(syscon1, UDC_SYSCON1);
1256 spin_unlock_irqrestore(&udc->lock, flags); 1247 spin_unlock_irqrestore(&udc->lock, flags);
1257 1248
1258 return 0; 1249 return 0;
@@ -1265,18 +1256,36 @@ static int can_pullup(struct omap_udc *udc)
1265 1256
1266static void pullup_enable(struct omap_udc *udc) 1257static void pullup_enable(struct omap_udc *udc)
1267{ 1258{
1268 UDC_SYSCON1_REG |= UDC_PULLUP_EN; 1259 u16 w;
1269 if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) 1260
1270 OTG_CTRL_REG |= OTG_BSESSVLD; 1261 w = omap_readw(UDC_SYSCON1);
1271 UDC_IRQ_EN_REG = UDC_DS_CHG_IE; 1262 w |= UDC_PULLUP_EN;
1263 omap_writew(w, UDC_SYSCON1);
1264 if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
1265 u32 l;
1266
1267 l = omap_readl(OTG_CTRL);
1268 l |= OTG_BSESSVLD;
1269 omap_writel(l, OTG_CTRL);
1270 }
1271 omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
1272} 1272}
1273 1273
1274static void pullup_disable(struct omap_udc *udc) 1274static void pullup_disable(struct omap_udc *udc)
1275{ 1275{
1276 if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) 1276 u16 w;
1277 OTG_CTRL_REG &= ~OTG_BSESSVLD; 1277
1278 UDC_IRQ_EN_REG = UDC_DS_CHG_IE; 1278 if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
1279 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN; 1279 u32 l;
1280
1281 l = omap_readl(OTG_CTRL);
1282 l &= ~OTG_BSESSVLD;
1283 omap_writel(l, OTG_CTRL);
1284 }
1285 omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
1286 w = omap_readw(UDC_SYSCON1);
1287 w &= ~UDC_PULLUP_EN;
1288 omap_writew(w, UDC_SYSCON1);
1280} 1289}
1281 1290
1282static struct omap_udc *udc; 1291static struct omap_udc *udc;
@@ -1304,6 +1313,7 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
1304{ 1313{
1305 struct omap_udc *udc; 1314 struct omap_udc *udc;
1306 unsigned long flags; 1315 unsigned long flags;
1316 u32 l;
1307 1317
1308 udc = container_of(gadget, struct omap_udc, gadget); 1318 udc = container_of(gadget, struct omap_udc, gadget);
1309 spin_lock_irqsave(&udc->lock, flags); 1319 spin_lock_irqsave(&udc->lock, flags);
@@ -1311,10 +1321,12 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
1311 udc->vbus_active = (is_active != 0); 1321 udc->vbus_active = (is_active != 0);
1312 if (cpu_is_omap15xx()) { 1322 if (cpu_is_omap15xx()) {
1313 /* "software" detect, ignored if !VBUS_MODE_1510 */ 1323 /* "software" detect, ignored if !VBUS_MODE_1510 */
1324 l = omap_readl(FUNC_MUX_CTRL_0);
1314 if (is_active) 1325 if (is_active)
1315 FUNC_MUX_CTRL_0_REG |= VBUS_CTRL_1510; 1326 l |= VBUS_CTRL_1510;
1316 else 1327 else
1317 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510; 1328 l &= ~VBUS_CTRL_1510;
1329 omap_writel(l, FUNC_MUX_CTRL_0);
1318 } 1330 }
1319 if (udc->dc_clk != NULL && is_active) { 1331 if (udc->dc_clk != NULL && is_active) {
1320 if (!udc->clk_requested) { 1332 if (!udc->clk_requested) {
@@ -1384,9 +1396,9 @@ static void nuke(struct omap_ep *ep, int status)
1384 dma_channel_release(ep); 1396 dma_channel_release(ep);
1385 1397
1386 use_ep(ep, 0); 1398 use_ep(ep, 0);
1387 UDC_CTRL_REG = UDC_CLR_EP; 1399 omap_writew(UDC_CLR_EP, UDC_CTRL);
1388 if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC) 1400 if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
1389 UDC_CTRL_REG = UDC_SET_HALT; 1401 omap_writew(UDC_SET_HALT, UDC_CTRL);
1390 1402
1391 while (!list_empty(&ep->queue)) { 1403 while (!list_empty(&ep->queue)) {
1392 req = list_entry(ep->queue.next, struct omap_req, queue); 1404 req = list_entry(ep->queue.next, struct omap_req, queue);
@@ -1414,8 +1426,8 @@ static void update_otg(struct omap_udc *udc)
1414 if (!gadget_is_otg(&udc->gadget)) 1426 if (!gadget_is_otg(&udc->gadget))
1415 return; 1427 return;
1416 1428
1417 if (OTG_CTRL_REG & OTG_ID) 1429 if (omap_readl(OTG_CTRL) & OTG_ID)
1418 devstat = UDC_DEVSTAT_REG; 1430 devstat = omap_readw(UDC_DEVSTAT);
1419 else 1431 else
1420 devstat = 0; 1432 devstat = 0;
1421 1433
@@ -1426,9 +1438,14 @@ static void update_otg(struct omap_udc *udc)
1426 /* Enable HNP early, avoiding races on suspend irq path. 1438 /* Enable HNP early, avoiding races on suspend irq path.
1427 * ASSUMES OTG state machine B_BUS_REQ input is true. 1439 * ASSUMES OTG state machine B_BUS_REQ input is true.
1428 */ 1440 */
1429 if (udc->gadget.b_hnp_enable) 1441 if (udc->gadget.b_hnp_enable) {
1430 OTG_CTRL_REG = (OTG_CTRL_REG | OTG_B_HNPEN | OTG_B_BUSREQ) 1442 u32 l;
1431 & ~OTG_PULLUP; 1443
1444 l = omap_readl(OTG_CTRL);
1445 l |= OTG_B_HNPEN | OTG_B_BUSREQ;
1446 l &= ~OTG_PULLUP;
1447 omap_writel(l, OTG_CTRL);
1448 }
1432} 1449}
1433 1450
1434static void ep0_irq(struct omap_udc *udc, u16 irq_src) 1451static void ep0_irq(struct omap_udc *udc, u16 irq_src)
@@ -1446,7 +1463,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1446 1463
1447 nuke(ep0, 0); 1464 nuke(ep0, 0);
1448 if (ack) { 1465 if (ack) {
1449 UDC_IRQ_SRC_REG = ack; 1466 omap_writew(ack, UDC_IRQ_SRC);
1450 irq_src = UDC_SETUP; 1467 irq_src = UDC_SETUP;
1451 } 1468 }
1452 } 1469 }
@@ -1466,9 +1483,9 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1466 if (irq_src & UDC_EP0_TX) { 1483 if (irq_src & UDC_EP0_TX) {
1467 int stat; 1484 int stat;
1468 1485
1469 UDC_IRQ_SRC_REG = UDC_EP0_TX; 1486 omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
1470 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR; 1487 omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
1471 stat = UDC_STAT_FLG_REG; 1488 stat = omap_readw(UDC_STAT_FLG);
1472 if (stat & UDC_ACK) { 1489 if (stat & UDC_ACK) {
1473 if (udc->ep0_in) { 1490 if (udc->ep0_in) {
1474 /* write next IN packet from response, 1491 /* write next IN packet from response,
@@ -1476,26 +1493,26 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1476 */ 1493 */
1477 if (req) 1494 if (req)
1478 stat = write_fifo(ep0, req); 1495 stat = write_fifo(ep0, req);
1479 UDC_EP_NUM_REG = UDC_EP_DIR; 1496 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1480 if (!req && udc->ep0_pending) { 1497 if (!req && udc->ep0_pending) {
1481 UDC_EP_NUM_REG = UDC_EP_SEL; 1498 omap_writew(UDC_EP_SEL, UDC_EP_NUM);
1482 UDC_CTRL_REG = UDC_CLR_EP; 1499 omap_writew(UDC_CLR_EP, UDC_CTRL);
1483 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1500 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1484 UDC_EP_NUM_REG = 0; 1501 omap_writew(0, UDC_EP_NUM);
1485 udc->ep0_pending = 0; 1502 udc->ep0_pending = 0;
1486 } /* else: 6 wait states before it'll tx */ 1503 } /* else: 6 wait states before it'll tx */
1487 } else { 1504 } else {
1488 /* ack status stage of OUT transfer */ 1505 /* ack status stage of OUT transfer */
1489 UDC_EP_NUM_REG = UDC_EP_DIR; 1506 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1490 if (req) 1507 if (req)
1491 done(ep0, req, 0); 1508 done(ep0, req, 0);
1492 } 1509 }
1493 req = NULL; 1510 req = NULL;
1494 } else if (stat & UDC_STALL) { 1511 } else if (stat & UDC_STALL) {
1495 UDC_CTRL_REG = UDC_CLR_HALT; 1512 omap_writew(UDC_CLR_HALT, UDC_CTRL);
1496 UDC_EP_NUM_REG = UDC_EP_DIR; 1513 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1497 } else { 1514 } else {
1498 UDC_EP_NUM_REG = UDC_EP_DIR; 1515 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1499 } 1516 }
1500 } 1517 }
1501 1518
@@ -1503,9 +1520,9 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1503 if (irq_src & UDC_EP0_RX) { 1520 if (irq_src & UDC_EP0_RX) {
1504 int stat; 1521 int stat;
1505 1522
1506 UDC_IRQ_SRC_REG = UDC_EP0_RX; 1523 omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
1507 UDC_EP_NUM_REG = UDC_EP_SEL; 1524 omap_writew(UDC_EP_SEL, UDC_EP_NUM);
1508 stat = UDC_STAT_FLG_REG; 1525 stat = omap_readw(UDC_STAT_FLG);
1509 if (stat & UDC_ACK) { 1526 if (stat & UDC_ACK) {
1510 if (!udc->ep0_in) { 1527 if (!udc->ep0_in) {
1511 stat = 0; 1528 stat = 0;
@@ -1513,34 +1530,35 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1513 * reactiviting the fifo; stall on errors. 1530 * reactiviting the fifo; stall on errors.
1514 */ 1531 */
1515 if (!req || (stat = read_fifo(ep0, req)) < 0) { 1532 if (!req || (stat = read_fifo(ep0, req)) < 0) {
1516 UDC_SYSCON2_REG = UDC_STALL_CMD; 1533 omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
1517 udc->ep0_pending = 0; 1534 udc->ep0_pending = 0;
1518 stat = 0; 1535 stat = 0;
1519 } else if (stat == 0) 1536 } else if (stat == 0)
1520 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1537 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1521 UDC_EP_NUM_REG = 0; 1538 omap_writew(0, UDC_EP_NUM);
1522 1539
1523 /* activate status stage */ 1540 /* activate status stage */
1524 if (stat == 1) { 1541 if (stat == 1) {
1525 done(ep0, req, 0); 1542 done(ep0, req, 0);
1526 /* that may have STALLed ep0... */ 1543 /* that may have STALLed ep0... */
1527 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR; 1544 omap_writew(UDC_EP_SEL | UDC_EP_DIR,
1528 UDC_CTRL_REG = UDC_CLR_EP; 1545 UDC_EP_NUM);
1529 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1546 omap_writew(UDC_CLR_EP, UDC_CTRL);
1530 UDC_EP_NUM_REG = UDC_EP_DIR; 1547 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1548 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1531 udc->ep0_pending = 0; 1549 udc->ep0_pending = 0;
1532 } 1550 }
1533 } else { 1551 } else {
1534 /* ack status stage of IN transfer */ 1552 /* ack status stage of IN transfer */
1535 UDC_EP_NUM_REG = 0; 1553 omap_writew(0, UDC_EP_NUM);
1536 if (req) 1554 if (req)
1537 done(ep0, req, 0); 1555 done(ep0, req, 0);
1538 } 1556 }
1539 } else if (stat & UDC_STALL) { 1557 } else if (stat & UDC_STALL) {
1540 UDC_CTRL_REG = UDC_CLR_HALT; 1558 omap_writew(UDC_CLR_HALT, UDC_CTRL);
1541 UDC_EP_NUM_REG = 0; 1559 omap_writew(0, UDC_EP_NUM);
1542 } else { 1560 } else {
1543 UDC_EP_NUM_REG = 0; 1561 omap_writew(0, UDC_EP_NUM);
1544 } 1562 }
1545 } 1563 }
1546 1564
@@ -1555,14 +1573,14 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1555 1573
1556 /* read the (latest) SETUP message */ 1574 /* read the (latest) SETUP message */
1557 do { 1575 do {
1558 UDC_EP_NUM_REG = UDC_SETUP_SEL; 1576 omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
1559 /* two bytes at a time */ 1577 /* two bytes at a time */
1560 u.word[0] = UDC_DATA_REG; 1578 u.word[0] = omap_readw(UDC_DATA);
1561 u.word[1] = UDC_DATA_REG; 1579 u.word[1] = omap_readw(UDC_DATA);
1562 u.word[2] = UDC_DATA_REG; 1580 u.word[2] = omap_readw(UDC_DATA);
1563 u.word[3] = UDC_DATA_REG; 1581 u.word[3] = omap_readw(UDC_DATA);
1564 UDC_EP_NUM_REG = 0; 1582 omap_writew(0, UDC_EP_NUM);
1565 } while (UDC_IRQ_SRC_REG & UDC_SETUP); 1583 } while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
1566 1584
1567#define w_value le16_to_cpu(u.r.wValue) 1585#define w_value le16_to_cpu(u.r.wValue)
1568#define w_index le16_to_cpu(u.r.wIndex) 1586#define w_index le16_to_cpu(u.r.wIndex)
@@ -1593,9 +1611,9 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1593 * later if it fails the request. 1611 * later if it fails the request.
1594 */ 1612 */
1595 if (udc->ep0_reset_config) 1613 if (udc->ep0_reset_config)
1596 UDC_SYSCON2_REG = UDC_CLR_CFG; 1614 omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
1597 else 1615 else
1598 UDC_SYSCON2_REG = UDC_DEV_CFG; 1616 omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
1599 update_otg(udc); 1617 update_otg(udc);
1600 goto delegate; 1618 goto delegate;
1601 case USB_REQ_CLEAR_FEATURE: 1619 case USB_REQ_CLEAR_FEATURE:
@@ -1613,10 +1631,10 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1613 || !ep->desc) 1631 || !ep->desc)
1614 goto do_stall; 1632 goto do_stall;
1615 use_ep(ep, 0); 1633 use_ep(ep, 0);
1616 UDC_CTRL_REG = udc->clr_halt; 1634 omap_writew(udc->clr_halt, UDC_CTRL);
1617 ep->ackwait = 0; 1635 ep->ackwait = 0;
1618 if (!(ep->bEndpointAddress & USB_DIR_IN)) { 1636 if (!(ep->bEndpointAddress & USB_DIR_IN)) {
1619 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1637 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1620 ep->ackwait = 1 + ep->double_buf; 1638 ep->ackwait = 1 + ep->double_buf;
1621 } 1639 }
1622 /* NOTE: assumes the host behaves sanely, 1640 /* NOTE: assumes the host behaves sanely,
@@ -1649,15 +1667,15 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1649 } 1667 }
1650 use_ep(ep, 0); 1668 use_ep(ep, 0);
1651 /* can't halt if fifo isn't empty... */ 1669 /* can't halt if fifo isn't empty... */
1652 UDC_CTRL_REG = UDC_CLR_EP; 1670 omap_writew(UDC_CLR_EP, UDC_CTRL);
1653 UDC_CTRL_REG = UDC_SET_HALT; 1671 omap_writew(UDC_SET_HALT, UDC_CTRL);
1654 VDBG("%s halted by host\n", ep->name); 1672 VDBG("%s halted by host\n", ep->name);
1655ep0out_status_stage: 1673ep0out_status_stage:
1656 status = 0; 1674 status = 0;
1657 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR; 1675 omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
1658 UDC_CTRL_REG = UDC_CLR_EP; 1676 omap_writew(UDC_CLR_EP, UDC_CTRL);
1659 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1677 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1660 UDC_EP_NUM_REG = UDC_EP_DIR; 1678 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1661 udc->ep0_pending = 0; 1679 udc->ep0_pending = 0;
1662 break; 1680 break;
1663 case USB_REQ_GET_STATUS: 1681 case USB_REQ_GET_STATUS:
@@ -1694,10 +1712,10 @@ intf_status:
1694 1712
1695zero_status: 1713zero_status:
1696 /* return two zero bytes */ 1714 /* return two zero bytes */
1697 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR; 1715 omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
1698 UDC_DATA_REG = 0; 1716 omap_writew(0, UDC_DATA);
1699 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1717 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1700 UDC_EP_NUM_REG = UDC_EP_DIR; 1718 omap_writew(UDC_EP_DIR, UDC_EP_NUM);
1701 status = 0; 1719 status = 0;
1702 VDBG("GET_STATUS, interface %d\n", w_index); 1720 VDBG("GET_STATUS, interface %d\n", w_index);
1703 /* next, status stage */ 1721 /* next, status stage */
@@ -1706,8 +1724,8 @@ zero_status:
1706delegate: 1724delegate:
1707 /* activate the ep0out fifo right away */ 1725 /* activate the ep0out fifo right away */
1708 if (!udc->ep0_in && w_length) { 1726 if (!udc->ep0_in && w_length) {
1709 UDC_EP_NUM_REG = 0; 1727 omap_writew(0, UDC_EP_NUM);
1710 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1728 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1711 } 1729 }
1712 1730
1713 /* gadget drivers see class/vendor specific requests, 1731 /* gadget drivers see class/vendor specific requests,
@@ -1748,9 +1766,9 @@ do_stall:
1748 if (udc->ep0_reset_config) 1766 if (udc->ep0_reset_config)
1749 WARN("error resetting config?\n"); 1767 WARN("error resetting config?\n");
1750 else 1768 else
1751 UDC_SYSCON2_REG = UDC_CLR_CFG; 1769 omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
1752 } 1770 }
1753 UDC_SYSCON2_REG = UDC_STALL_CMD; 1771 omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
1754 udc->ep0_pending = 0; 1772 udc->ep0_pending = 0;
1755 } 1773 }
1756 } 1774 }
@@ -1764,7 +1782,7 @@ static void devstate_irq(struct omap_udc *udc, u16 irq_src)
1764{ 1782{
1765 u16 devstat, change; 1783 u16 devstat, change;
1766 1784
1767 devstat = UDC_DEVSTAT_REG; 1785 devstat = omap_readw(UDC_DEVSTAT);
1768 change = devstat ^ udc->devstat; 1786 change = devstat ^ udc->devstat;
1769 udc->devstat = devstat; 1787 udc->devstat = devstat;
1770 1788
@@ -1804,7 +1822,8 @@ static void devstate_irq(struct omap_udc *udc, u16 irq_src)
1804 INFO("USB reset done, gadget %s\n", 1822 INFO("USB reset done, gadget %s\n",
1805 udc->driver->driver.name); 1823 udc->driver->driver.name);
1806 /* ep0 traffic is legal from now on */ 1824 /* ep0 traffic is legal from now on */
1807 UDC_IRQ_EN_REG = UDC_DS_CHG_IE | UDC_EP0_IE; 1825 omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
1826 UDC_IRQ_EN);
1808 } 1827 }
1809 change &= ~UDC_USB_RESET; 1828 change &= ~UDC_USB_RESET;
1810 } 1829 }
@@ -1848,7 +1867,7 @@ static void devstate_irq(struct omap_udc *udc, u16 irq_src)
1848 VDBG("devstat %03x, ignore change %03x\n", 1867 VDBG("devstat %03x, ignore change %03x\n",
1849 devstat, change); 1868 devstat, change);
1850 1869
1851 UDC_IRQ_SRC_REG = UDC_DS_CHG; 1870 omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
1852} 1871}
1853 1872
1854static irqreturn_t omap_udc_irq(int irq, void *_udc) 1873static irqreturn_t omap_udc_irq(int irq, void *_udc)
@@ -1859,7 +1878,7 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
1859 unsigned long flags; 1878 unsigned long flags;
1860 1879
1861 spin_lock_irqsave(&udc->lock, flags); 1880 spin_lock_irqsave(&udc->lock, flags);
1862 irq_src = UDC_IRQ_SRC_REG; 1881 irq_src = omap_readw(UDC_IRQ_SRC);
1863 1882
1864 /* Device state change (usb ch9 stuff) */ 1883 /* Device state change (usb ch9 stuff) */
1865 if (irq_src & UDC_DS_CHG) { 1884 if (irq_src & UDC_DS_CHG) {
@@ -1882,7 +1901,7 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
1882 irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT); 1901 irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
1883 } 1902 }
1884 1903
1885 irq_src &= ~(UDC_SOF|UDC_EPN_TX|UDC_EPN_RX); 1904 irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
1886 if (irq_src) 1905 if (irq_src)
1887 DBG("udc_irq, unhandled %03x\n", irq_src); 1906 DBG("udc_irq, unhandled %03x\n", irq_src);
1888 spin_unlock_irqrestore(&udc->lock, flags); 1907 spin_unlock_irqrestore(&udc->lock, flags);
@@ -1903,7 +1922,7 @@ static void pio_out_timer(unsigned long _ep)
1903 spin_lock_irqsave(&ep->udc->lock, flags); 1922 spin_lock_irqsave(&ep->udc->lock, flags);
1904 if (!list_empty(&ep->queue) && ep->ackwait) { 1923 if (!list_empty(&ep->queue) && ep->ackwait) {
1905 use_ep(ep, UDC_EP_SEL); 1924 use_ep(ep, UDC_EP_SEL);
1906 stat_flg = UDC_STAT_FLG_REG; 1925 stat_flg = omap_readw(UDC_STAT_FLG);
1907 1926
1908 if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN) 1927 if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
1909 || (ep->double_buf && HALF_FULL(stat_flg)))) { 1928 || (ep->double_buf && HALF_FULL(stat_flg)))) {
@@ -1913,8 +1932,8 @@ static void pio_out_timer(unsigned long _ep)
1913 req = container_of(ep->queue.next, 1932 req = container_of(ep->queue.next,
1914 struct omap_req, queue); 1933 struct omap_req, queue);
1915 (void) read_fifo(ep, req); 1934 (void) read_fifo(ep, req);
1916 UDC_EP_NUM_REG = ep->bEndpointAddress; 1935 omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
1917 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1936 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1918 ep->ackwait = 1 + ep->double_buf; 1937 ep->ackwait = 1 + ep->double_buf;
1919 } else 1938 } else
1920 deselect_ep(); 1939 deselect_ep();
@@ -1934,20 +1953,20 @@ static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
1934 unsigned long flags; 1953 unsigned long flags;
1935 1954
1936 spin_lock_irqsave(&udc->lock, flags); 1955 spin_lock_irqsave(&udc->lock, flags);
1937 epn_stat = UDC_EPN_STAT_REG; 1956 epn_stat = omap_readw(UDC_EPN_STAT);
1938 irq_src = UDC_IRQ_SRC_REG; 1957 irq_src = omap_readw(UDC_IRQ_SRC);
1939 1958
1940 /* handle OUT first, to avoid some wasteful NAKs */ 1959 /* handle OUT first, to avoid some wasteful NAKs */
1941 if (irq_src & UDC_EPN_RX) { 1960 if (irq_src & UDC_EPN_RX) {
1942 epnum = (epn_stat >> 8) & 0x0f; 1961 epnum = (epn_stat >> 8) & 0x0f;
1943 UDC_IRQ_SRC_REG = UDC_EPN_RX; 1962 omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
1944 status = IRQ_HANDLED; 1963 status = IRQ_HANDLED;
1945 ep = &udc->ep[epnum]; 1964 ep = &udc->ep[epnum];
1946 ep->irqs++; 1965 ep->irqs++;
1947 1966
1948 UDC_EP_NUM_REG = epnum | UDC_EP_SEL; 1967 omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
1949 ep->fnf = 0; 1968 ep->fnf = 0;
1950 if ((UDC_STAT_FLG_REG & UDC_ACK)) { 1969 if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
1951 ep->ackwait--; 1970 ep->ackwait--;
1952 if (!list_empty(&ep->queue)) { 1971 if (!list_empty(&ep->queue)) {
1953 int stat; 1972 int stat;
@@ -1959,15 +1978,15 @@ static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
1959 } 1978 }
1960 } 1979 }
1961 /* min 6 clock delay before clearing EP_SEL ... */ 1980 /* min 6 clock delay before clearing EP_SEL ... */
1962 epn_stat = UDC_EPN_STAT_REG; 1981 epn_stat = omap_readw(UDC_EPN_STAT);
1963 epn_stat = UDC_EPN_STAT_REG; 1982 epn_stat = omap_readw(UDC_EPN_STAT);
1964 UDC_EP_NUM_REG = epnum; 1983 omap_writew(epnum, UDC_EP_NUM);
1965 1984
1966 /* enabling fifo _after_ clearing ACK, contrary to docs, 1985 /* enabling fifo _after_ clearing ACK, contrary to docs,
1967 * reduces lossage; timer still needed though (sigh). 1986 * reduces lossage; timer still needed though (sigh).
1968 */ 1987 */
1969 if (ep->fnf) { 1988 if (ep->fnf) {
1970 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1989 omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
1971 ep->ackwait = 1 + ep->double_buf; 1990 ep->ackwait = 1 + ep->double_buf;
1972 } 1991 }
1973 mod_timer(&ep->timer, PIO_OUT_TIMEOUT); 1992 mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
@@ -1976,13 +1995,13 @@ static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
1976 /* then IN transfers */ 1995 /* then IN transfers */
1977 else if (irq_src & UDC_EPN_TX) { 1996 else if (irq_src & UDC_EPN_TX) {
1978 epnum = epn_stat & 0x0f; 1997 epnum = epn_stat & 0x0f;
1979 UDC_IRQ_SRC_REG = UDC_EPN_TX; 1998 omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
1980 status = IRQ_HANDLED; 1999 status = IRQ_HANDLED;
1981 ep = &udc->ep[16 + epnum]; 2000 ep = &udc->ep[16 + epnum];
1982 ep->irqs++; 2001 ep->irqs++;
1983 2002
1984 UDC_EP_NUM_REG = epnum | UDC_EP_DIR | UDC_EP_SEL; 2003 omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
1985 if ((UDC_STAT_FLG_REG & UDC_ACK)) { 2004 if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
1986 ep->ackwait = 0; 2005 ep->ackwait = 0;
1987 if (!list_empty(&ep->queue)) { 2006 if (!list_empty(&ep->queue)) {
1988 req = container_of(ep->queue.next, 2007 req = container_of(ep->queue.next,
@@ -1991,9 +2010,9 @@ static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
1991 } 2010 }
1992 } 2011 }
1993 /* min 6 clock delay before clearing EP_SEL ... */ 2012 /* min 6 clock delay before clearing EP_SEL ... */
1994 epn_stat = UDC_EPN_STAT_REG; 2013 epn_stat = omap_readw(UDC_EPN_STAT);
1995 epn_stat = UDC_EPN_STAT_REG; 2014 epn_stat = omap_readw(UDC_EPN_STAT);
1996 UDC_EP_NUM_REG = epnum | UDC_EP_DIR; 2015 omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
1997 /* then 6 clocks before it'd tx */ 2016 /* then 6 clocks before it'd tx */
1998 } 2017 }
1999 2018
@@ -2021,7 +2040,7 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
2021 req = list_entry(ep->queue.next, struct omap_req, queue); 2040 req = list_entry(ep->queue.next, struct omap_req, queue);
2022 2041
2023 use_ep(ep, UDC_EP_SEL); 2042 use_ep(ep, UDC_EP_SEL);
2024 stat = UDC_STAT_FLG_REG; 2043 stat = omap_readw(UDC_STAT_FLG);
2025 2044
2026 /* NOTE: like the other controller drivers, this isn't 2045 /* NOTE: like the other controller drivers, this isn't
2027 * currently reporting lost or damaged frames. 2046 * currently reporting lost or damaged frames.
@@ -2053,9 +2072,14 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
2053 if (!list_empty(&ep->queue)) 2072 if (!list_empty(&ep->queue))
2054 pending = 1; 2073 pending = 1;
2055 } 2074 }
2056 if (!pending) 2075 if (!pending) {
2057 UDC_IRQ_EN_REG &= ~UDC_SOF_IE; 2076 u16 w;
2058 UDC_IRQ_SRC_REG = UDC_SOF; 2077
2078 w = omap_readw(UDC_IRQ_EN);
2079 w &= ~UDC_SOF_IE;
2080 omap_writew(w, UDC_IRQ_EN);
2081 }
2082 omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
2059 2083
2060 spin_unlock_irqrestore(&udc->lock, flags); 2084 spin_unlock_irqrestore(&udc->lock, flags);
2061 return IRQ_HANDLED; 2085 return IRQ_HANDLED;
@@ -2104,7 +2128,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2104 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) 2128 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
2105 continue; 2129 continue;
2106 use_ep(ep, 0); 2130 use_ep(ep, 0);
2107 UDC_CTRL_REG = UDC_SET_HALT; 2131 omap_writew(UDC_SET_HALT, UDC_CTRL);
2108 } 2132 }
2109 udc->ep0_pending = 0; 2133 udc->ep0_pending = 0;
2110 udc->ep[0].irqs = 0; 2134 udc->ep[0].irqs = 0;
@@ -2128,7 +2152,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2128 } 2152 }
2129 DBG("bound to driver %s\n", driver->driver.name); 2153 DBG("bound to driver %s\n", driver->driver.name);
2130 2154
2131 UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK; 2155 omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
2132 2156
2133 /* connect to bus through transceiver */ 2157 /* connect to bus through transceiver */
2134 if (udc->transceiver) { 2158 if (udc->transceiver) {
@@ -2225,7 +2249,7 @@ static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
2225 else 2249 else
2226 buf[0] = 0; 2250 buf[0] = 0;
2227 2251
2228 stat_flg = UDC_STAT_FLG_REG; 2252 stat_flg = omap_readw(UDC_STAT_FLG);
2229 seq_printf(s, 2253 seq_printf(s,
2230 "\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n", 2254 "\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
2231 ep->name, buf, 2255 ep->name, buf,
@@ -2292,11 +2316,11 @@ static int proc_otg_show(struct seq_file *s)
2292 trans = CONTROL_DEVCONF_REG; 2316 trans = CONTROL_DEVCONF_REG;
2293 } else { 2317 } else {
2294 ctrl_name = "tranceiver_ctrl"; 2318 ctrl_name = "tranceiver_ctrl";
2295 trans = USB_TRANSCEIVER_CTRL_REG; 2319 trans = omap_readw(USB_TRANSCEIVER_CTRL);
2296 } 2320 }
2297 seq_printf(s, "\nOTG rev %d.%d, %s %05x\n", 2321 seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
2298 tmp >> 4, tmp & 0xf, ctrl_name, trans); 2322 tmp >> 4, tmp & 0xf, ctrl_name, trans);
2299 tmp = OTG_SYSCON_1_REG; 2323 tmp = omap_readw(OTG_SYSCON_1);
2300 seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s," 2324 seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
2301 FOURBITS "\n", tmp, 2325 FOURBITS "\n", tmp,
2302 trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R), 2326 trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
@@ -2308,7 +2332,7 @@ static int proc_otg_show(struct seq_file *s)
2308 (tmp & HST_IDLE_EN) ? " !host" : "", 2332 (tmp & HST_IDLE_EN) ? " !host" : "",
2309 (tmp & DEV_IDLE_EN) ? " !dev" : "", 2333 (tmp & DEV_IDLE_EN) ? " !dev" : "",
2310 (tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active"); 2334 (tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
2311 tmp = OTG_SYSCON_2_REG; 2335 tmp = omap_readl(OTG_SYSCON_2);
2312 seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS 2336 seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
2313 " b_ase_brst=%d hmc=%d\n", tmp, 2337 " b_ase_brst=%d hmc=%d\n", tmp,
2314 (tmp & OTG_EN) ? " otg_en" : "", 2338 (tmp & OTG_EN) ? " otg_en" : "",
@@ -2323,7 +2347,7 @@ static int proc_otg_show(struct seq_file *s)
2323 (tmp & HMC_TLLATTACH) ? " tllattach" : "", 2347 (tmp & HMC_TLLATTACH) ? " tllattach" : "",
2324 B_ASE_BRST(tmp), 2348 B_ASE_BRST(tmp),
2325 OTG_HMC(tmp)); 2349 OTG_HMC(tmp));
2326 tmp = OTG_CTRL_REG; 2350 tmp = omap_readl(OTG_CTRL);
2327 seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp, 2351 seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
2328 (tmp & OTG_ASESSVLD) ? " asess" : "", 2352 (tmp & OTG_ASESSVLD) ? " asess" : "",
2329 (tmp & OTG_BSESSEND) ? " bsess_end" : "", 2353 (tmp & OTG_BSESSEND) ? " bsess_end" : "",
@@ -2343,13 +2367,13 @@ static int proc_otg_show(struct seq_file *s)
2343 (tmp & OTG_PU_VBUS) ? " pu_vb" : "", 2367 (tmp & OTG_PU_VBUS) ? " pu_vb" : "",
2344 (tmp & OTG_PU_ID) ? " pu_id" : "" 2368 (tmp & OTG_PU_ID) ? " pu_id" : ""
2345 ); 2369 );
2346 tmp = OTG_IRQ_EN_REG; 2370 tmp = omap_readw(OTG_IRQ_EN);
2347 seq_printf(s, "otg_irq_en %04x" "\n", tmp); 2371 seq_printf(s, "otg_irq_en %04x" "\n", tmp);
2348 tmp = OTG_IRQ_SRC_REG; 2372 tmp = omap_readw(OTG_IRQ_SRC);
2349 seq_printf(s, "otg_irq_src %04x" "\n", tmp); 2373 seq_printf(s, "otg_irq_src %04x" "\n", tmp);
2350 tmp = OTG_OUTCTRL_REG; 2374 tmp = omap_readw(OTG_OUTCTRL);
2351 seq_printf(s, "otg_outctrl %04x" "\n", tmp); 2375 seq_printf(s, "otg_outctrl %04x" "\n", tmp);
2352 tmp = OTG_TEST_REG; 2376 tmp = omap_readw(OTG_TEST);
2353 seq_printf(s, "otg_test %04x" "\n", tmp); 2377 seq_printf(s, "otg_test %04x" "\n", tmp);
2354 return 0; 2378 return 0;
2355} 2379}
@@ -2370,7 +2394,7 @@ static int proc_udc_show(struct seq_file *s, void *_)
2370 driver_desc, 2394 driver_desc,
2371 use_dma ? " (dma)" : ""); 2395 use_dma ? " (dma)" : "");
2372 2396
2373 tmp = UDC_REV_REG & 0xff; 2397 tmp = omap_readw(UDC_REV) & 0xff;
2374 seq_printf(s, 2398 seq_printf(s,
2375 "UDC rev %d.%d, fifo mode %d, gadget %s\n" 2399 "UDC rev %d.%d, fifo mode %d, gadget %s\n"
2376 "hmc %d, transceiver %s\n", 2400 "hmc %d, transceiver %s\n",
@@ -2384,16 +2408,16 @@ static int proc_udc_show(struct seq_file *s, void *_)
2384 ? "external" : "(none)")); 2408 ? "external" : "(none)"));
2385 if (cpu_class_is_omap1()) { 2409 if (cpu_class_is_omap1()) {
2386 seq_printf(s, "ULPD control %04x req %04x status %04x\n", 2410 seq_printf(s, "ULPD control %04x req %04x status %04x\n",
2387 __REG16(ULPD_CLOCK_CTRL), 2411 omap_readw(ULPD_CLOCK_CTRL),
2388 __REG16(ULPD_SOFT_REQ), 2412 omap_readw(ULPD_SOFT_REQ),
2389 __REG16(ULPD_STATUS_REQ)); 2413 omap_readw(ULPD_STATUS_REQ));
2390 } 2414 }
2391 2415
2392 /* OTG controller registers */ 2416 /* OTG controller registers */
2393 if (!cpu_is_omap15xx()) 2417 if (!cpu_is_omap15xx())
2394 proc_otg_show(s); 2418 proc_otg_show(s);
2395 2419
2396 tmp = UDC_SYSCON1_REG; 2420 tmp = omap_readw(UDC_SYSCON1);
2397 seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp, 2421 seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
2398 (tmp & UDC_CFG_LOCK) ? " cfg_lock" : "", 2422 (tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
2399 (tmp & UDC_DATA_ENDIAN) ? " data_endian" : "", 2423 (tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
@@ -2412,7 +2436,7 @@ static int proc_udc_show(struct seq_file *s, void *_)
2412 return 0; 2436 return 0;
2413 } 2437 }
2414 2438
2415 tmp = UDC_DEVSTAT_REG; 2439 tmp = omap_readw(UDC_DEVSTAT);
2416 seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp, 2440 seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
2417 (tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "", 2441 (tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
2418 (tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "", 2442 (tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
@@ -2424,20 +2448,20 @@ static int proc_udc_show(struct seq_file *s, void *_)
2424 (tmp & UDC_ADD) ? " ADD" : "", 2448 (tmp & UDC_ADD) ? " ADD" : "",
2425 (tmp & UDC_DEF) ? " DEF" : "", 2449 (tmp & UDC_DEF) ? " DEF" : "",
2426 (tmp & UDC_ATT) ? " ATT" : ""); 2450 (tmp & UDC_ATT) ? " ATT" : "");
2427 seq_printf(s, "sof %04x\n", UDC_SOF_REG); 2451 seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF));
2428 tmp = UDC_IRQ_EN_REG; 2452 tmp = omap_readw(UDC_IRQ_EN);
2429 seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp, 2453 seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
2430 (tmp & UDC_SOF_IE) ? " sof" : "", 2454 (tmp & UDC_SOF_IE) ? " sof" : "",
2431 (tmp & UDC_EPN_RX_IE) ? " epn_rx" : "", 2455 (tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
2432 (tmp & UDC_EPN_TX_IE) ? " epn_tx" : "", 2456 (tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
2433 (tmp & UDC_DS_CHG_IE) ? " ds_chg" : "", 2457 (tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
2434 (tmp & UDC_EP0_IE) ? " ep0" : ""); 2458 (tmp & UDC_EP0_IE) ? " ep0" : "");
2435 tmp = UDC_IRQ_SRC_REG; 2459 tmp = omap_readw(UDC_IRQ_SRC);
2436 seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp, 2460 seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
2437 (tmp & UDC_TXN_DONE) ? " txn_done" : "", 2461 (tmp & UDC_TXN_DONE) ? " txn_done" : "",
2438 (tmp & UDC_RXN_CNT) ? " rxn_cnt" : "", 2462 (tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
2439 (tmp & UDC_RXN_EOT) ? " rxn_eot" : "", 2463 (tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
2440 (tmp & UDC_SOF) ? " sof" : "", 2464 (tmp & UDC_IRQ_SOF) ? " sof" : "",
2441 (tmp & UDC_EPN_RX) ? " epn_rx" : "", 2465 (tmp & UDC_EPN_RX) ? " epn_rx" : "",
2442 (tmp & UDC_EPN_TX) ? " epn_tx" : "", 2466 (tmp & UDC_EPN_TX) ? " epn_tx" : "",
2443 (tmp & UDC_DS_CHG) ? " ds_chg" : "", 2467 (tmp & UDC_DS_CHG) ? " ds_chg" : "",
@@ -2447,7 +2471,7 @@ static int proc_udc_show(struct seq_file *s, void *_)
2447 if (use_dma) { 2471 if (use_dma) {
2448 unsigned i; 2472 unsigned i;
2449 2473
2450 tmp = UDC_DMA_IRQ_EN_REG; 2474 tmp = omap_readw(UDC_DMA_IRQ_EN);
2451 seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp, 2475 seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
2452 (tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "", 2476 (tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
2453 (tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "", 2477 (tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
@@ -2461,29 +2485,29 @@ static int proc_udc_show(struct seq_file *s, void *_)
2461 (tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "", 2485 (tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
2462 (tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : ""); 2486 (tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
2463 2487
2464 tmp = UDC_RXDMA_CFG_REG; 2488 tmp = omap_readw(UDC_RXDMA_CFG);
2465 seq_printf(s, "rxdma_cfg %04x\n", tmp); 2489 seq_printf(s, "rxdma_cfg %04x\n", tmp);
2466 if (tmp) { 2490 if (tmp) {
2467 for (i = 0; i < 3; i++) { 2491 for (i = 0; i < 3; i++) {
2468 if ((tmp & (0x0f << (i * 4))) == 0) 2492 if ((tmp & (0x0f << (i * 4))) == 0)
2469 continue; 2493 continue;
2470 seq_printf(s, "rxdma[%d] %04x\n", i, 2494 seq_printf(s, "rxdma[%d] %04x\n", i,
2471 UDC_RXDMA_REG(i + 1)); 2495 omap_readw(UDC_RXDMA(i + 1)));
2472 } 2496 }
2473 } 2497 }
2474 tmp = UDC_TXDMA_CFG_REG; 2498 tmp = omap_readw(UDC_TXDMA_CFG);
2475 seq_printf(s, "txdma_cfg %04x\n", tmp); 2499 seq_printf(s, "txdma_cfg %04x\n", tmp);
2476 if (tmp) { 2500 if (tmp) {
2477 for (i = 0; i < 3; i++) { 2501 for (i = 0; i < 3; i++) {
2478 if (!(tmp & (0x0f << (i * 4)))) 2502 if (!(tmp & (0x0f << (i * 4))))
2479 continue; 2503 continue;
2480 seq_printf(s, "txdma[%d] %04x\n", i, 2504 seq_printf(s, "txdma[%d] %04x\n", i,
2481 UDC_TXDMA_REG(i + 1)); 2505 omap_readw(UDC_TXDMA(i + 1)));
2482 } 2506 }
2483 } 2507 }
2484 } 2508 }
2485 2509
2486 tmp = UDC_DEVSTAT_REG; 2510 tmp = omap_readw(UDC_DEVSTAT);
2487 if (tmp & UDC_ATT) { 2511 if (tmp & UDC_ATT) {
2488 proc_ep_show(s, &udc->ep[0]); 2512 proc_ep_show(s, &udc->ep[0]);
2489 if (tmp & UDC_ADD) { 2513 if (tmp & UDC_ADD) {
@@ -2535,7 +2559,7 @@ static inline void remove_proc_file(void) {}
2535 * buffer space among the endpoints we'll be operating. 2559 * buffer space among the endpoints we'll be operating.
2536 * 2560 *
2537 * NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when 2561 * NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
2538 * UDC_SYSCON_1_REG.CFG_LOCK is set can now work. We won't use that 2562 * UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that
2539 * capability yet though. 2563 * capability yet though.
2540 */ 2564 */
2541static unsigned __init 2565static unsigned __init
@@ -2597,9 +2621,9 @@ omap_ep_setup(char *name, u8 addr, u8 type,
2597 name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf); 2621 name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
2598 2622
2599 if (addr & USB_DIR_IN) 2623 if (addr & USB_DIR_IN)
2600 UDC_EP_TX_REG(addr & 0xf) = epn_rxtx; 2624 omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
2601 else 2625 else
2602 UDC_EP_RX_REG(addr) = epn_rxtx; 2626 omap_writew(epn_rxtx, UDC_EP_RX(addr));
2603 2627
2604 /* next endpoint's buffer starts after this one's */ 2628 /* next endpoint's buffer starts after this one's */
2605 buf += maxp; 2629 buf += maxp;
@@ -2638,15 +2662,15 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
2638 unsigned tmp, buf; 2662 unsigned tmp, buf;
2639 2663
2640 /* abolish any previous hardware state */ 2664 /* abolish any previous hardware state */
2641 UDC_SYSCON1_REG = 0; 2665 omap_writew(0, UDC_SYSCON1);
2642 UDC_IRQ_EN_REG = 0; 2666 omap_writew(0, UDC_IRQ_EN);
2643 UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK; 2667 omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
2644 UDC_DMA_IRQ_EN_REG = 0; 2668 omap_writew(0, UDC_DMA_IRQ_EN);
2645 UDC_RXDMA_CFG_REG = 0; 2669 omap_writew(0, UDC_RXDMA_CFG);
2646 UDC_TXDMA_CFG_REG = 0; 2670 omap_writew(0, UDC_TXDMA_CFG);
2647 2671
2648 /* UDC_PULLUP_EN gates the chip clock */ 2672 /* UDC_PULLUP_EN gates the chip clock */
2649 // OTG_SYSCON_1_REG |= DEV_IDLE_EN; 2673 // OTG_SYSCON_1 |= DEV_IDLE_EN;
2650 2674
2651 udc = kzalloc(sizeof(*udc), GFP_KERNEL); 2675 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2652 if (!udc) 2676 if (!udc)
@@ -2677,8 +2701,8 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
2677 2701
2678 /* initially disable all non-ep0 endpoints */ 2702 /* initially disable all non-ep0 endpoints */
2679 for (tmp = 1; tmp < 15; tmp++) { 2703 for (tmp = 1; tmp < 15; tmp++) {
2680 UDC_EP_RX_REG(tmp) = 0; 2704 omap_writew(0, UDC_EP_RX(tmp));
2681 UDC_EP_TX_REG(tmp) = 0; 2705 omap_writew(0, UDC_EP_TX(tmp));
2682 } 2706 }
2683 2707
2684#define OMAP_BULK_EP(name,addr) \ 2708#define OMAP_BULK_EP(name,addr) \
@@ -2763,7 +2787,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
2763 ERR("unsupported fifo_mode #%d\n", fifo_mode); 2787 ERR("unsupported fifo_mode #%d\n", fifo_mode);
2764 return -ENODEV; 2788 return -ENODEV;
2765 } 2789 }
2766 UDC_SYSCON1_REG = UDC_CFG_LOCK|UDC_SELF_PWR; 2790 omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
2767 INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf); 2791 INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
2768 return 0; 2792 return 0;
2769} 2793}
@@ -2807,7 +2831,7 @@ static int __init omap_udc_probe(struct platform_device *pdev)
2807 } 2831 }
2808 2832
2809 INFO("OMAP UDC rev %d.%d%s\n", 2833 INFO("OMAP UDC rev %d.%d%s\n",
2810 UDC_REV_REG >> 4, UDC_REV_REG & 0xf, 2834 omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
2811 config->otg ? ", Mini-AB" : ""); 2835 config->otg ? ", Mini-AB" : "");
2812 2836
2813 /* use the mode given to us by board init code */ 2837 /* use the mode given to us by board init code */
@@ -2822,12 +2846,12 @@ static int __init omap_udc_probe(struct platform_device *pdev)
2822 * know when to turn PULLUP_EN on/off; and that 2846 * know when to turn PULLUP_EN on/off; and that
2823 * means we always "need" the 48MHz clock. 2847 * means we always "need" the 48MHz clock.
2824 */ 2848 */
2825 u32 tmp = FUNC_MUX_CTRL_0_REG; 2849 u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
2826 2850 tmp &= ~VBUS_CTRL_1510;
2827 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510; 2851 omap_writel(tmp, FUNC_MUX_CTRL_0);
2828 tmp |= VBUS_MODE_1510; 2852 tmp |= VBUS_MODE_1510;
2829 tmp &= ~VBUS_CTRL_1510; 2853 tmp &= ~VBUS_CTRL_1510;
2830 FUNC_MUX_CTRL_0_REG = tmp; 2854 omap_writel(tmp, FUNC_MUX_CTRL_0);
2831 } 2855 }
2832 } else { 2856 } else {
2833 /* The transceiver may package some GPIO logic or handle 2857 /* The transceiver may package some GPIO logic or handle
@@ -2907,7 +2931,7 @@ known:
2907#endif 2931#endif
2908 2932
2909 /* starting with omap1710 es2.0, clear toggle is a separate bit */ 2933 /* starting with omap1710 es2.0, clear toggle is a separate bit */
2910 if (UDC_REV_REG >= 0x61) 2934 if (omap_readw(UDC_REV) >= 0x61)
2911 udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE; 2935 udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
2912 else 2936 else
2913 udc->clr_halt = UDC_RESET_EP; 2937 udc->clr_halt = UDC_RESET_EP;
@@ -3005,7 +3029,7 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
3005 put_device(udc->transceiver->dev); 3029 put_device(udc->transceiver->dev);
3006 udc->transceiver = NULL; 3030 udc->transceiver = NULL;
3007 } 3031 }
3008 UDC_SYSCON1_REG = 0; 3032 omap_writew(0, UDC_SYSCON1);
3009 3033
3010 remove_proc_file(); 3034 remove_proc_file();
3011 3035
@@ -3036,7 +3060,7 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
3036 * 3060 *
3037 * REVISIT we should probably reject suspend requests when there's a host 3061 * REVISIT we should probably reject suspend requests when there's a host
3038 * session active, rather than disconnecting, at least on boards that can 3062 * session active, rather than disconnecting, at least on boards that can
3039 * report VBUS irqs (UDC_DEVSTAT_REG.UDC_ATT). And in any case, we need to 3063 * report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to
3040 * make host resumes and VBUS detection trigger OMAP wakeup events; that 3064 * make host resumes and VBUS detection trigger OMAP wakeup events; that
3041 * may involve talking to an external transceiver (e.g. isp1301). 3065 * may involve talking to an external transceiver (e.g. isp1301).
3042 */ 3066 */
@@ -3045,7 +3069,7 @@ static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
3045{ 3069{
3046 u32 devstat; 3070 u32 devstat;
3047 3071
3048 devstat = UDC_DEVSTAT_REG; 3072 devstat = omap_readw(UDC_DEVSTAT);
3049 3073
3050 /* we're requesting 48 MHz clock if the pullup is enabled 3074 /* we're requesting 48 MHz clock if the pullup is enabled
3051 * (== we're attached to the host) and we're not suspended, 3075 * (== we're attached to the host) and we're not suspended,
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
index c6b9cbc7230a..8522bbb12278 100644
--- a/drivers/usb/gadget/omap_udc.h
+++ b/drivers/usb/gadget/omap_udc.h
@@ -8,23 +8,22 @@
8/* 8/*
9 * USB device/endpoint management registers 9 * USB device/endpoint management registers
10 */ 10 */
11#define UDC_REG(offset) __REG16(UDC_BASE + (offset))
12 11
13#define UDC_REV_REG UDC_REG(0x0) /* Revision */ 12#define UDC_REV (UDC_BASE + 0x0) /* Revision */
14#define UDC_EP_NUM_REG UDC_REG(0x4) /* Which endpoint */ 13#define UDC_EP_NUM (UDC_BASE + 0x4) /* Which endpoint */
15# define UDC_SETUP_SEL (1 << 6) 14# define UDC_SETUP_SEL (1 << 6)
16# define UDC_EP_SEL (1 << 5) 15# define UDC_EP_SEL (1 << 5)
17# define UDC_EP_DIR (1 << 4) 16# define UDC_EP_DIR (1 << 4)
18 /* low 4 bits for endpoint number */ 17 /* low 4 bits for endpoint number */
19#define UDC_DATA_REG UDC_REG(0x08) /* Endpoint FIFO */ 18#define UDC_DATA (UDC_BASE + 0x08) /* Endpoint FIFO */
20#define UDC_CTRL_REG UDC_REG(0x0C) /* Endpoint control */ 19#define UDC_CTRL (UDC_BASE + 0x0C) /* Endpoint control */
21# define UDC_CLR_HALT (1 << 7) 20# define UDC_CLR_HALT (1 << 7)
22# define UDC_SET_HALT (1 << 6) 21# define UDC_SET_HALT (1 << 6)
23# define UDC_CLRDATA_TOGGLE (1 << 3) 22# define UDC_CLRDATA_TOGGLE (1 << 3)
24# define UDC_SET_FIFO_EN (1 << 2) 23# define UDC_SET_FIFO_EN (1 << 2)
25# define UDC_CLR_EP (1 << 1) 24# define UDC_CLR_EP (1 << 1)
26# define UDC_RESET_EP (1 << 0) 25# define UDC_RESET_EP (1 << 0)
27#define UDC_STAT_FLG_REG UDC_REG(0x10) /* Endpoint status */ 26#define UDC_STAT_FLG (UDC_BASE + 0x10) /* Endpoint status */
28# define UDC_NO_RXPACKET (1 << 15) 27# define UDC_NO_RXPACKET (1 << 15)
29# define UDC_MISS_IN (1 << 14) 28# define UDC_MISS_IN (1 << 14)
30# define UDC_DATA_FLUSH (1 << 13) 29# define UDC_DATA_FLUSH (1 << 13)
@@ -38,8 +37,8 @@
38# define UDC_FIFO_EN (1 << 2) 37# define UDC_FIFO_EN (1 << 2)
39# define UDC_NON_ISO_FIFO_EMPTY (1 << 1) 38# define UDC_NON_ISO_FIFO_EMPTY (1 << 1)
40# define UDC_NON_ISO_FIFO_FULL (1 << 0) 39# define UDC_NON_ISO_FIFO_FULL (1 << 0)
41#define UDC_RXFSTAT_REG UDC_REG(0x14) /* OUT bytecount */ 40#define UDC_RXFSTAT (UDC_BASE + 0x14) /* OUT bytecount */
42#define UDC_SYSCON1_REG UDC_REG(0x18) /* System config 1 */ 41#define UDC_SYSCON1 (UDC_BASE + 0x18) /* System config 1 */
43# define UDC_CFG_LOCK (1 << 8) 42# define UDC_CFG_LOCK (1 << 8)
44# define UDC_DATA_ENDIAN (1 << 7) 43# define UDC_DATA_ENDIAN (1 << 7)
45# define UDC_DMA_ENDIAN (1 << 6) 44# define UDC_DMA_ENDIAN (1 << 6)
@@ -48,12 +47,12 @@
48# define UDC_SELF_PWR (1 << 2) 47# define UDC_SELF_PWR (1 << 2)
49# define UDC_SOFF_DIS (1 << 1) 48# define UDC_SOFF_DIS (1 << 1)
50# define UDC_PULLUP_EN (1 << 0) 49# define UDC_PULLUP_EN (1 << 0)
51#define UDC_SYSCON2_REG UDC_REG(0x1C) /* System config 2 */ 50#define UDC_SYSCON2 (UDC_BASE + 0x1C) /* System config 2 */
52# define UDC_RMT_WKP (1 << 6) 51# define UDC_RMT_WKP (1 << 6)
53# define UDC_STALL_CMD (1 << 5) 52# define UDC_STALL_CMD (1 << 5)
54# define UDC_DEV_CFG (1 << 3) 53# define UDC_DEV_CFG (1 << 3)
55# define UDC_CLR_CFG (1 << 2) 54# define UDC_CLR_CFG (1 << 2)
56#define UDC_DEVSTAT_REG UDC_REG(0x20) /* Device status */ 55#define UDC_DEVSTAT (UDC_BASE + 0x20) /* Device status */
57# define UDC_B_HNP_ENABLE (1 << 9) 56# define UDC_B_HNP_ENABLE (1 << 9)
58# define UDC_A_HNP_SUPPORT (1 << 8) 57# define UDC_A_HNP_SUPPORT (1 << 8)
59# define UDC_A_ALT_HNP_SUPPORT (1 << 7) 58# define UDC_A_ALT_HNP_SUPPORT (1 << 7)
@@ -64,26 +63,26 @@
64# define UDC_ADD (1 << 2) 63# define UDC_ADD (1 << 2)
65# define UDC_DEF (1 << 1) 64# define UDC_DEF (1 << 1)
66# define UDC_ATT (1 << 0) 65# define UDC_ATT (1 << 0)
67#define UDC_SOF_REG UDC_REG(0x24) /* Start of frame */ 66#define UDC_SOF (UDC_BASE + 0x24) /* Start of frame */
68# define UDC_FT_LOCK (1 << 12) 67# define UDC_FT_LOCK (1 << 12)
69# define UDC_TS_OK (1 << 11) 68# define UDC_TS_OK (1 << 11)
70# define UDC_TS 0x03ff 69# define UDC_TS 0x03ff
71#define UDC_IRQ_EN_REG UDC_REG(0x28) /* Interrupt enable */ 70#define UDC_IRQ_EN (UDC_BASE + 0x28) /* Interrupt enable */
72# define UDC_SOF_IE (1 << 7) 71# define UDC_SOF_IE (1 << 7)
73# define UDC_EPN_RX_IE (1 << 5) 72# define UDC_EPN_RX_IE (1 << 5)
74# define UDC_EPN_TX_IE (1 << 4) 73# define UDC_EPN_TX_IE (1 << 4)
75# define UDC_DS_CHG_IE (1 << 3) 74# define UDC_DS_CHG_IE (1 << 3)
76# define UDC_EP0_IE (1 << 0) 75# define UDC_EP0_IE (1 << 0)
77#define UDC_DMA_IRQ_EN_REG UDC_REG(0x2C) /* DMA irq enable */ 76#define UDC_DMA_IRQ_EN (UDC_BASE + 0x2C) /* DMA irq enable */
78 /* rx/tx dma channels numbered 1-3 not 0-2 */ 77 /* rx/tx dma channels numbered 1-3 not 0-2 */
79# define UDC_TX_DONE_IE(n) (1 << (4 * (n) - 2)) 78# define UDC_TX_DONE_IE(n) (1 << (4 * (n) - 2))
80# define UDC_RX_CNT_IE(n) (1 << (4 * (n) - 3)) 79# define UDC_RX_CNT_IE(n) (1 << (4 * (n) - 3))
81# define UDC_RX_EOT_IE(n) (1 << (4 * (n) - 4)) 80# define UDC_RX_EOT_IE(n) (1 << (4 * (n) - 4))
82#define UDC_IRQ_SRC_REG UDC_REG(0x30) /* Interrupt source */ 81#define UDC_IRQ_SRC (UDC_BASE + 0x30) /* Interrupt source */
83# define UDC_TXN_DONE (1 << 10) 82# define UDC_TXN_DONE (1 << 10)
84# define UDC_RXN_CNT (1 << 9) 83# define UDC_RXN_CNT (1 << 9)
85# define UDC_RXN_EOT (1 << 8) 84# define UDC_RXN_EOT (1 << 8)
86# define UDC_SOF (1 << 7) 85# define UDC_IRQ_SOF (1 << 7)
87# define UDC_EPN_RX (1 << 5) 86# define UDC_EPN_RX (1 << 5)
88# define UDC_EPN_TX (1 << 4) 87# define UDC_EPN_TX (1 << 4)
89# define UDC_DS_CHG (1 << 3) 88# define UDC_DS_CHG (1 << 3)
@@ -91,41 +90,41 @@
91# define UDC_EP0_RX (1 << 1) 90# define UDC_EP0_RX (1 << 1)
92# define UDC_EP0_TX (1 << 0) 91# define UDC_EP0_TX (1 << 0)
93# define UDC_IRQ_SRC_MASK 0x7bf 92# define UDC_IRQ_SRC_MASK 0x7bf
94#define UDC_EPN_STAT_REG UDC_REG(0x34) /* EP irq status */ 93#define UDC_EPN_STAT (UDC_BASE + 0x34) /* EP irq status */
95#define UDC_DMAN_STAT_REG UDC_REG(0x38) /* DMA irq status */ 94#define UDC_DMAN_STAT (UDC_BASE + 0x38) /* DMA irq status */
96# define UDC_DMA_RX_SB (1 << 12) 95# define UDC_DMA_RX_SB (1 << 12)
97# define UDC_DMA_RX_SRC(x) (((x)>>8) & 0xf) 96# define UDC_DMA_RX_SRC(x) (((x)>>8) & 0xf)
98# define UDC_DMA_TX_SRC(x) (((x)>>0) & 0xf) 97# define UDC_DMA_TX_SRC(x) (((x)>>0) & 0xf)
99 98
100 99
101/* DMA configuration registers: up to three channels in each direction. */ 100/* DMA configuration registers: up to three channels in each direction. */
102#define UDC_RXDMA_CFG_REG UDC_REG(0x40) /* 3 eps for RX DMA */ 101#define UDC_RXDMA_CFG (UDC_BASE + 0x40) /* 3 eps for RX DMA */
103# define UDC_DMA_REQ (1 << 12) 102# define UDC_DMA_REQ (1 << 12)
104#define UDC_TXDMA_CFG_REG UDC_REG(0x44) /* 3 eps for TX DMA */ 103#define UDC_TXDMA_CFG (UDC_BASE + 0x44) /* 3 eps for TX DMA */
105#define UDC_DATA_DMA_REG UDC_REG(0x48) /* rx/tx fifo addr */ 104#define UDC_DATA_DMA (UDC_BASE + 0x48) /* rx/tx fifo addr */
106 105
107/* rx/tx dma control, numbering channels 1-3 not 0-2 */ 106/* rx/tx dma control, numbering channels 1-3 not 0-2 */
108#define UDC_TXDMA_REG(chan) UDC_REG(0x50 - 4 + 4 * (chan)) 107#define UDC_TXDMA(chan) (UDC_BASE + 0x50 - 4 + 4 * (chan))
109# define UDC_TXN_EOT (1 << 15) /* bytes vs packets */ 108# define UDC_TXN_EOT (1 << 15) /* bytes vs packets */
110# define UDC_TXN_START (1 << 14) /* start transfer */ 109# define UDC_TXN_START (1 << 14) /* start transfer */
111# define UDC_TXN_TSC 0x03ff /* units in xfer */ 110# define UDC_TXN_TSC 0x03ff /* units in xfer */
112#define UDC_RXDMA_REG(chan) UDC_REG(0x60 - 4 + 4 * (chan)) 111#define UDC_RXDMA(chan) (UDC_BASE + 0x60 - 4 + 4 * (chan))
113# define UDC_RXN_STOP (1 << 15) /* enable EOT irq */ 112# define UDC_RXN_STOP (1 << 15) /* enable EOT irq */
114# define UDC_RXN_TC 0x00ff /* packets in xfer */ 113# define UDC_RXN_TC 0x00ff /* packets in xfer */
115 114
116 115
117/* 116/*
118 * Endpoint configuration registers (used before CFG_LOCK is set) 117 * Endpoint configuration registers (used before CFG_LOCK is set)
119 * UDC_EP_TX_REG(0) is unused 118 * UDC_EP_TX(0) is unused
120 */ 119 */
121#define UDC_EP_RX_REG(endpoint) UDC_REG(0x80 + (endpoint)*4) 120#define UDC_EP_RX(endpoint) (UDC_BASE + 0x80 + (endpoint)*4)
122# define UDC_EPN_RX_VALID (1 << 15) 121# define UDC_EPN_RX_VALID (1 << 15)
123# define UDC_EPN_RX_DB (1 << 14) 122# define UDC_EPN_RX_DB (1 << 14)
124 /* buffer size in bits 13, 12 */ 123 /* buffer size in bits 13, 12 */
125# define UDC_EPN_RX_ISO (1 << 11) 124# define UDC_EPN_RX_ISO (1 << 11)
126 /* buffer pointer in low 11 bits */ 125 /* buffer pointer in low 11 bits */
127#define UDC_EP_TX_REG(endpoint) UDC_REG(0xc0 + (endpoint)*4) 126#define UDC_EP_TX(endpoint) (UDC_BASE + 0xc0 + (endpoint)*4)
128 /* same bitfields as in RX_REG */ 127 /* same bitfields as in RX */
129 128
130/*-------------------------------------------------------------------------*/ 129/*-------------------------------------------------------------------------*/
131 130
@@ -195,14 +194,14 @@ struct omap_udc {
195 194
196/*-------------------------------------------------------------------------*/ 195/*-------------------------------------------------------------------------*/
197 196
198#define MOD_CONF_CTRL_0_REG __REG32(MOD_CONF_CTRL_0) 197/* MOD_CONF_CTRL_0 */
199#define VBUS_W2FC_1510 (1 << 17) /* 0 gpio0, 1 dvdd2 pin */ 198#define VBUS_W2FC_1510 (1 << 17) /* 0 gpio0, 1 dvdd2 pin */
200 199
201#define FUNC_MUX_CTRL_0_REG __REG32(FUNC_MUX_CTRL_0) 200/* FUNC_MUX_CTRL_0 */
202#define VBUS_CTRL_1510 (1 << 19) /* 1 connected (software) */ 201#define VBUS_CTRL_1510 (1 << 19) /* 1 connected (software) */
203#define VBUS_MODE_1510 (1 << 18) /* 0 hardware, 1 software */ 202#define VBUS_MODE_1510 (1 << 18) /* 0 hardware, 1 software */
204 203
205#define HMC_1510 ((MOD_CONF_CTRL_0_REG >> 1) & 0x3f) 204#define HMC_1510 ((omap_readl(MOD_CONF_CTRL_0) >> 1) & 0x3f)
206#define HMC_1610 (OTG_SYSCON_2_REG & 0x3f) 205#define HMC_1610 (omap_readl(OTG_SYSCON_2) & 0x3f)
207#define HMC (cpu_is_omap15xx() ? HMC_1510 : HMC_1610) 206#define HMC (cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
208 207
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 76be75e3ab8f..ec8f2eb041ca 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -462,6 +462,7 @@ printer_open(struct inode *inode, struct file *fd)
462 unsigned long flags; 462 unsigned long flags;
463 int ret = -EBUSY; 463 int ret = -EBUSY;
464 464
465 lock_kernel();
465 dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev); 466 dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev);
466 467
467 spin_lock_irqsave(&dev->lock, flags); 468 spin_lock_irqsave(&dev->lock, flags);
@@ -477,7 +478,7 @@ printer_open(struct inode *inode, struct file *fd)
477 spin_unlock_irqrestore(&dev->lock, flags); 478 spin_unlock_irqrestore(&dev->lock, flags);
478 479
479 DBG(dev, "printer_open returned %x\n", ret); 480 DBG(dev, "printer_open returned %x\n", ret);
480 481 unlock_kernel();
481 return ret; 482 return ret;
482} 483}
483 484
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 08f699b1fc57..031dceb93023 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers 2 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
4 * 3 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker) 4 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
@@ -46,19 +45,25 @@
46#include <linux/err.h> 45#include <linux/err.h>
47#include <linux/seq_file.h> 46#include <linux/seq_file.h>
48#include <linux/debugfs.h> 47#include <linux/debugfs.h>
48#include <linux/io.h>
49 49
50#include <asm/byteorder.h> 50#include <asm/byteorder.h>
51#include <asm/dma.h> 51#include <asm/dma.h>
52#include <asm/gpio.h> 52#include <asm/gpio.h>
53#include <asm/io.h>
54#include <asm/system.h> 53#include <asm/system.h>
55#include <asm/mach-types.h> 54#include <asm/mach-types.h>
56#include <asm/unaligned.h> 55#include <asm/unaligned.h>
57#include <asm/hardware.h>
58 56
59#include <linux/usb/ch9.h> 57#include <linux/usb/ch9.h>
60#include <linux/usb/gadget.h> 58#include <linux/usb/gadget.h>
61 59
60/*
61 * This driver is PXA25x only. Grab the right register definitions.
62 */
63#ifdef CONFIG_ARCH_PXA
64#include <asm/arch/pxa25x-udc.h>
65#endif
66
62#include <asm/mach/udc_pxa2xx.h> 67#include <asm/mach/udc_pxa2xx.h>
63 68
64 69
@@ -91,7 +96,7 @@
91#define DRIVER_DESC "PXA 25x USB Device Controller driver" 96#define DRIVER_DESC "PXA 25x USB Device Controller driver"
92 97
93 98
94static const char driver_name [] = "pxa2xx_udc"; 99static const char driver_name [] = "pxa25x_udc";
95 100
96static const char ep0name [] = "ep0"; 101static const char ep0name [] = "ep0";
97 102
@@ -111,10 +116,10 @@ static const char ep0name [] = "ep0";
111 116
112#endif 117#endif
113 118
114#include "pxa2xx_udc.h" 119#include "pxa25x_udc.h"
115 120
116 121
117#ifdef CONFIG_USB_PXA2XX_SMALL 122#ifdef CONFIG_USB_PXA25X_SMALL
118#define SIZE_STR " (small)" 123#define SIZE_STR " (small)"
119#else 124#else
120#define SIZE_STR "" 125#define SIZE_STR ""
@@ -126,8 +131,8 @@ static const char ep0name [] = "ep0";
126 * --------------------------------------------------------------------------- 131 * ---------------------------------------------------------------------------
127 */ 132 */
128 133
129static void pxa2xx_ep_fifo_flush (struct usb_ep *ep); 134static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
130static void nuke (struct pxa2xx_ep *, int status); 135static void nuke (struct pxa25x_ep *, int status);
131 136
132/* one GPIO should be used to detect VBUS from the host */ 137/* one GPIO should be used to detect VBUS from the host */
133static int is_vbus_present(void) 138static int is_vbus_present(void)
@@ -212,24 +217,24 @@ static inline void udc_ack_int_UDCCR(int mask)
212/* 217/*
213 * endpoint enable/disable 218 * endpoint enable/disable
214 * 219 *
215 * we need to verify the descriptors used to enable endpoints. since pxa2xx 220 * we need to verify the descriptors used to enable endpoints. since pxa25x
216 * endpoint configurations are fixed, and are pretty much always enabled, 221 * endpoint configurations are fixed, and are pretty much always enabled,
217 * there's not a lot to manage here. 222 * there's not a lot to manage here.
218 * 223 *
219 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints, 224 * because pxa25x can't selectively initialize bulk (or interrupt) endpoints,
220 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except 225 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
221 * for a single interface (with only the default altsetting) and for gadget 226 * for a single interface (with only the default altsetting) and for gadget
222 * drivers that don't halt endpoints (not reset by set_interface). that also 227 * drivers that don't halt endpoints (not reset by set_interface). that also
223 * means that if you use ISO, you must violate the USB spec rule that all 228 * means that if you use ISO, you must violate the USB spec rule that all
224 * iso endpoints must be in non-default altsettings. 229 * iso endpoints must be in non-default altsettings.
225 */ 230 */
226static int pxa2xx_ep_enable (struct usb_ep *_ep, 231static int pxa25x_ep_enable (struct usb_ep *_ep,
227 const struct usb_endpoint_descriptor *desc) 232 const struct usb_endpoint_descriptor *desc)
228{ 233{
229 struct pxa2xx_ep *ep; 234 struct pxa25x_ep *ep;
230 struct pxa2xx_udc *dev; 235 struct pxa25x_udc *dev;
231 236
232 ep = container_of (_ep, struct pxa2xx_ep, ep); 237 ep = container_of (_ep, struct pxa25x_ep, ep);
233 if (!_ep || !desc || ep->desc || _ep->name == ep0name 238 if (!_ep || !desc || ep->desc || _ep->name == ep0name
234 || desc->bDescriptorType != USB_DT_ENDPOINT 239 || desc->bDescriptorType != USB_DT_ENDPOINT
235 || ep->bEndpointAddress != desc->bEndpointAddress 240 || ep->bEndpointAddress != desc->bEndpointAddress
@@ -268,7 +273,7 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
268 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize); 273 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
269 274
270 /* flush fifo (mostly for OUT buffers) */ 275 /* flush fifo (mostly for OUT buffers) */
271 pxa2xx_ep_fifo_flush (_ep); 276 pxa25x_ep_fifo_flush (_ep);
272 277
273 /* ... reset halt state too, if we could ... */ 278 /* ... reset halt state too, if we could ... */
274 279
@@ -276,12 +281,12 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
276 return 0; 281 return 0;
277} 282}
278 283
279static int pxa2xx_ep_disable (struct usb_ep *_ep) 284static int pxa25x_ep_disable (struct usb_ep *_ep)
280{ 285{
281 struct pxa2xx_ep *ep; 286 struct pxa25x_ep *ep;
282 unsigned long flags; 287 unsigned long flags;
283 288
284 ep = container_of (_ep, struct pxa2xx_ep, ep); 289 ep = container_of (_ep, struct pxa25x_ep, ep);
285 if (!_ep || !ep->desc) { 290 if (!_ep || !ep->desc) {
286 DMSG("%s, %s not enabled\n", __func__, 291 DMSG("%s, %s not enabled\n", __func__,
287 _ep ? ep->ep.name : NULL); 292 _ep ? ep->ep.name : NULL);
@@ -292,7 +297,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
292 nuke (ep, -ESHUTDOWN); 297 nuke (ep, -ESHUTDOWN);
293 298
294 /* flush fifo (mostly for IN buffers) */ 299 /* flush fifo (mostly for IN buffers) */
295 pxa2xx_ep_fifo_flush (_ep); 300 pxa25x_ep_fifo_flush (_ep);
296 301
297 ep->desc = NULL; 302 ep->desc = NULL;
298 ep->stopped = 1; 303 ep->stopped = 1;
@@ -304,18 +309,18 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
304 309
305/*-------------------------------------------------------------------------*/ 310/*-------------------------------------------------------------------------*/
306 311
307/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers 312/* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers
308 * must still pass correctly initialized endpoints, since other controller 313 * must still pass correctly initialized endpoints, since other controller
309 * drivers may care about how it's currently set up (dma issues etc). 314 * drivers may care about how it's currently set up (dma issues etc).
310 */ 315 */
311 316
312/* 317/*
313 * pxa2xx_ep_alloc_request - allocate a request data structure 318 * pxa25x_ep_alloc_request - allocate a request data structure
314 */ 319 */
315static struct usb_request * 320static struct usb_request *
316pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) 321pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
317{ 322{
318 struct pxa2xx_request *req; 323 struct pxa25x_request *req;
319 324
320 req = kzalloc(sizeof(*req), gfp_flags); 325 req = kzalloc(sizeof(*req), gfp_flags);
321 if (!req) 326 if (!req)
@@ -327,14 +332,14 @@ pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
327 332
328 333
329/* 334/*
330 * pxa2xx_ep_free_request - deallocate a request data structure 335 * pxa25x_ep_free_request - deallocate a request data structure
331 */ 336 */
332static void 337static void
333pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) 338pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
334{ 339{
335 struct pxa2xx_request *req; 340 struct pxa25x_request *req;
336 341
337 req = container_of (_req, struct pxa2xx_request, req); 342 req = container_of (_req, struct pxa25x_request, req);
338 WARN_ON (!list_empty (&req->queue)); 343 WARN_ON (!list_empty (&req->queue));
339 kfree(req); 344 kfree(req);
340} 345}
@@ -344,7 +349,7 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
344/* 349/*
345 * done - retire a request; caller blocked irqs 350 * done - retire a request; caller blocked irqs
346 */ 351 */
347static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status) 352static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
348{ 353{
349 unsigned stopped = ep->stopped; 354 unsigned stopped = ep->stopped;
350 355
@@ -367,13 +372,13 @@ static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
367} 372}
368 373
369 374
370static inline void ep0_idle (struct pxa2xx_udc *dev) 375static inline void ep0_idle (struct pxa25x_udc *dev)
371{ 376{
372 dev->ep0state = EP0_IDLE; 377 dev->ep0state = EP0_IDLE;
373} 378}
374 379
375static int 380static int
376write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max) 381write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max)
377{ 382{
378 u8 *buf; 383 u8 *buf;
379 unsigned length, count; 384 unsigned length, count;
@@ -398,7 +403,7 @@ write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
398 * caller guarantees at least one packet buffer is ready (or a zlp). 403 * caller guarantees at least one packet buffer is ready (or a zlp).
399 */ 404 */
400static int 405static int
401write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req) 406write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
402{ 407{
403 unsigned max; 408 unsigned max;
404 409
@@ -455,7 +460,7 @@ write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
455 * ep0 data stage. these chips want very simple state transitions. 460 * ep0 data stage. these chips want very simple state transitions.
456 */ 461 */
457static inline 462static inline
458void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag) 463void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
459{ 464{
460 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR; 465 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
461 USIR0 = USIR0_IR0; 466 USIR0 = USIR0_IR0;
@@ -465,7 +470,7 @@ void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
465} 470}
466 471
467static int 472static int
468write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req) 473write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
469{ 474{
470 unsigned count; 475 unsigned count;
471 int is_short; 476 int is_short;
@@ -525,7 +530,7 @@ write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
525 * request buffer having filled (and maybe overran till end-of-packet). 530 * request buffer having filled (and maybe overran till end-of-packet).
526 */ 531 */
527static int 532static int
528read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req) 533read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
529{ 534{
530 for (;;) { 535 for (;;) {
531 u32 udccs; 536 u32 udccs;
@@ -602,7 +607,7 @@ read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
602 * protocols do use them. 607 * protocols do use them.
603 */ 608 */
604static int 609static int
605read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req) 610read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
606{ 611{
607 u8 *buf, byte; 612 u8 *buf, byte;
608 unsigned bufferspace; 613 unsigned bufferspace;
@@ -641,21 +646,21 @@ read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
641/*-------------------------------------------------------------------------*/ 646/*-------------------------------------------------------------------------*/
642 647
643static int 648static int
644pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 649pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
645{ 650{
646 struct pxa2xx_request *req; 651 struct pxa25x_request *req;
647 struct pxa2xx_ep *ep; 652 struct pxa25x_ep *ep;
648 struct pxa2xx_udc *dev; 653 struct pxa25x_udc *dev;
649 unsigned long flags; 654 unsigned long flags;
650 655
651 req = container_of(_req, struct pxa2xx_request, req); 656 req = container_of(_req, struct pxa25x_request, req);
652 if (unlikely (!_req || !_req->complete || !_req->buf 657 if (unlikely (!_req || !_req->complete || !_req->buf
653 || !list_empty(&req->queue))) { 658 || !list_empty(&req->queue))) {
654 DMSG("%s, bad params\n", __func__); 659 DMSG("%s, bad params\n", __func__);
655 return -EINVAL; 660 return -EINVAL;
656 } 661 }
657 662
658 ep = container_of(_ep, struct pxa2xx_ep, ep); 663 ep = container_of(_ep, struct pxa25x_ep, ep);
659 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) { 664 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
660 DMSG("%s, bad ep\n", __func__); 665 DMSG("%s, bad ep\n", __func__);
661 return -EINVAL; 666 return -EINVAL;
@@ -751,14 +756,14 @@ pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
751/* 756/*
752 * nuke - dequeue ALL requests 757 * nuke - dequeue ALL requests
753 */ 758 */
754static void nuke(struct pxa2xx_ep *ep, int status) 759static void nuke(struct pxa25x_ep *ep, int status)
755{ 760{
756 struct pxa2xx_request *req; 761 struct pxa25x_request *req;
757 762
758 /* called with irqs blocked */ 763 /* called with irqs blocked */
759 while (!list_empty(&ep->queue)) { 764 while (!list_empty(&ep->queue)) {
760 req = list_entry(ep->queue.next, 765 req = list_entry(ep->queue.next,
761 struct pxa2xx_request, 766 struct pxa25x_request,
762 queue); 767 queue);
763 done(ep, req, status); 768 done(ep, req, status);
764 } 769 }
@@ -768,13 +773,13 @@ static void nuke(struct pxa2xx_ep *ep, int status)
768 773
769 774
770/* dequeue JUST ONE request */ 775/* dequeue JUST ONE request */
771static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 776static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
772{ 777{
773 struct pxa2xx_ep *ep; 778 struct pxa25x_ep *ep;
774 struct pxa2xx_request *req; 779 struct pxa25x_request *req;
775 unsigned long flags; 780 unsigned long flags;
776 781
777 ep = container_of(_ep, struct pxa2xx_ep, ep); 782 ep = container_of(_ep, struct pxa25x_ep, ep);
778 if (!_ep || ep->ep.name == ep0name) 783 if (!_ep || ep->ep.name == ep0name)
779 return -EINVAL; 784 return -EINVAL;
780 785
@@ -798,12 +803,12 @@ static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
798 803
799/*-------------------------------------------------------------------------*/ 804/*-------------------------------------------------------------------------*/
800 805
801static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value) 806static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
802{ 807{
803 struct pxa2xx_ep *ep; 808 struct pxa25x_ep *ep;
804 unsigned long flags; 809 unsigned long flags;
805 810
806 ep = container_of(_ep, struct pxa2xx_ep, ep); 811 ep = container_of(_ep, struct pxa25x_ep, ep);
807 if (unlikely (!_ep 812 if (unlikely (!_ep
808 || (!ep->desc && ep->ep.name != ep0name)) 813 || (!ep->desc && ep->ep.name != ep0name))
809 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 814 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
@@ -853,11 +858,11 @@ static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
853 return 0; 858 return 0;
854} 859}
855 860
856static int pxa2xx_ep_fifo_status(struct usb_ep *_ep) 861static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
857{ 862{
858 struct pxa2xx_ep *ep; 863 struct pxa25x_ep *ep;
859 864
860 ep = container_of(_ep, struct pxa2xx_ep, ep); 865 ep = container_of(_ep, struct pxa25x_ep, ep);
861 if (!_ep) { 866 if (!_ep) {
862 DMSG("%s, bad ep\n", __func__); 867 DMSG("%s, bad ep\n", __func__);
863 return -ENODEV; 868 return -ENODEV;
@@ -872,11 +877,11 @@ static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
872 return (*ep->reg_ubcr & 0xfff) + 1; 877 return (*ep->reg_ubcr & 0xfff) + 1;
873} 878}
874 879
875static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep) 880static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
876{ 881{
877 struct pxa2xx_ep *ep; 882 struct pxa25x_ep *ep;
878 883
879 ep = container_of(_ep, struct pxa2xx_ep, ep); 884 ep = container_of(_ep, struct pxa25x_ep, ep);
880 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) { 885 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
881 DMSG("%s, bad ep\n", __func__); 886 DMSG("%s, bad ep\n", __func__);
882 return; 887 return;
@@ -898,19 +903,19 @@ static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
898} 903}
899 904
900 905
901static struct usb_ep_ops pxa2xx_ep_ops = { 906static struct usb_ep_ops pxa25x_ep_ops = {
902 .enable = pxa2xx_ep_enable, 907 .enable = pxa25x_ep_enable,
903 .disable = pxa2xx_ep_disable, 908 .disable = pxa25x_ep_disable,
904 909
905 .alloc_request = pxa2xx_ep_alloc_request, 910 .alloc_request = pxa25x_ep_alloc_request,
906 .free_request = pxa2xx_ep_free_request, 911 .free_request = pxa25x_ep_free_request,
907 912
908 .queue = pxa2xx_ep_queue, 913 .queue = pxa25x_ep_queue,
909 .dequeue = pxa2xx_ep_dequeue, 914 .dequeue = pxa25x_ep_dequeue,
910 915
911 .set_halt = pxa2xx_ep_set_halt, 916 .set_halt = pxa25x_ep_set_halt,
912 .fifo_status = pxa2xx_ep_fifo_status, 917 .fifo_status = pxa25x_ep_fifo_status,
913 .fifo_flush = pxa2xx_ep_fifo_flush, 918 .fifo_flush = pxa25x_ep_fifo_flush,
914}; 919};
915 920
916 921
@@ -919,12 +924,12 @@ static struct usb_ep_ops pxa2xx_ep_ops = {
919 * --------------------------------------------------------------------------- 924 * ---------------------------------------------------------------------------
920 */ 925 */
921 926
922static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget) 927static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
923{ 928{
924 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff); 929 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
925} 930}
926 931
927static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget) 932static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
928{ 933{
929 /* host may not have enabled remote wakeup */ 934 /* host may not have enabled remote wakeup */
930 if ((UDCCS0 & UDCCS0_DRWF) == 0) 935 if ((UDCCS0 & UDCCS0_DRWF) == 0)
@@ -933,14 +938,14 @@ static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
933 return 0; 938 return 0;
934} 939}
935 940
936static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *); 941static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *);
937static void udc_enable (struct pxa2xx_udc *); 942static void udc_enable (struct pxa25x_udc *);
938static void udc_disable(struct pxa2xx_udc *); 943static void udc_disable(struct pxa25x_udc *);
939 944
940/* We disable the UDC -- and its 48 MHz clock -- whenever it's not 945/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
941 * in active use. 946 * in active use.
942 */ 947 */
943static int pullup(struct pxa2xx_udc *udc) 948static int pullup(struct pxa25x_udc *udc)
944{ 949{
945 int is_active = udc->vbus && udc->pullup && !udc->suspended; 950 int is_active = udc->vbus && udc->pullup && !udc->suspended;
946 DMSG("%s\n", is_active ? "active" : "inactive"); 951 DMSG("%s\n", is_active ? "active" : "inactive");
@@ -970,11 +975,11 @@ static int pullup(struct pxa2xx_udc *udc)
970} 975}
971 976
972/* VBUS reporting logically comes from a transceiver */ 977/* VBUS reporting logically comes from a transceiver */
973static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) 978static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
974{ 979{
975 struct pxa2xx_udc *udc; 980 struct pxa25x_udc *udc;
976 981
977 udc = container_of(_gadget, struct pxa2xx_udc, gadget); 982 udc = container_of(_gadget, struct pxa25x_udc, gadget);
978 udc->vbus = (is_active != 0); 983 udc->vbus = (is_active != 0);
979 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 984 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
980 pullup(udc); 985 pullup(udc);
@@ -982,11 +987,11 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
982} 987}
983 988
984/* drivers may have software control over D+ pullup */ 989/* drivers may have software control over D+ pullup */
985static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) 990static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
986{ 991{
987 struct pxa2xx_udc *udc; 992 struct pxa25x_udc *udc;
988 993
989 udc = container_of(_gadget, struct pxa2xx_udc, gadget); 994 udc = container_of(_gadget, struct pxa25x_udc, gadget);
990 995
991 /* not all boards support pullup control */ 996 /* not all boards support pullup control */
992 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 997 if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
@@ -997,11 +1002,11 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
997 return 0; 1002 return 0;
998} 1003}
999 1004
1000static const struct usb_gadget_ops pxa2xx_udc_ops = { 1005static const struct usb_gadget_ops pxa25x_udc_ops = {
1001 .get_frame = pxa2xx_udc_get_frame, 1006 .get_frame = pxa25x_udc_get_frame,
1002 .wakeup = pxa2xx_udc_wakeup, 1007 .wakeup = pxa25x_udc_wakeup,
1003 .vbus_session = pxa2xx_udc_vbus_session, 1008 .vbus_session = pxa25x_udc_vbus_session,
1004 .pullup = pxa2xx_udc_pullup, 1009 .pullup = pxa25x_udc_pullup,
1005 1010
1006 // .vbus_draw ... boards may consume current from VBUS, up to 1011 // .vbus_draw ... boards may consume current from VBUS, up to
1007 // 100-500mA based on config. the 500uA suspend ceiling means 1012 // 100-500mA based on config. the 500uA suspend ceiling means
@@ -1015,7 +1020,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = {
1015static int 1020static int
1016udc_seq_show(struct seq_file *m, void *_d) 1021udc_seq_show(struct seq_file *m, void *_d)
1017{ 1022{
1018 struct pxa2xx_udc *dev = m->private; 1023 struct pxa25x_udc *dev = m->private;
1019 unsigned long flags; 1024 unsigned long flags;
1020 int i; 1025 int i;
1021 u32 tmp; 1026 u32 tmp;
@@ -1076,8 +1081,8 @@ udc_seq_show(struct seq_file *m, void *_d)
1076 1081
1077 /* dump endpoint queues */ 1082 /* dump endpoint queues */
1078 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1083 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1079 struct pxa2xx_ep *ep = &dev->ep [i]; 1084 struct pxa25x_ep *ep = &dev->ep [i];
1080 struct pxa2xx_request *req; 1085 struct pxa25x_request *req;
1081 1086
1082 if (i != 0) { 1087 if (i != 0) {
1083 const struct usb_endpoint_descriptor *desc; 1088 const struct usb_endpoint_descriptor *desc;
@@ -1150,7 +1155,7 @@ static const struct file_operations debug_fops = {
1150/* 1155/*
1151 * udc_disable - disable USB device controller 1156 * udc_disable - disable USB device controller
1152 */ 1157 */
1153static void udc_disable(struct pxa2xx_udc *dev) 1158static void udc_disable(struct pxa25x_udc *dev)
1154{ 1159{
1155 /* block all irqs */ 1160 /* block all irqs */
1156 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM); 1161 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
@@ -1170,7 +1175,7 @@ static void udc_disable(struct pxa2xx_udc *dev)
1170/* 1175/*
1171 * udc_reinit - initialize software state 1176 * udc_reinit - initialize software state
1172 */ 1177 */
1173static void udc_reinit(struct pxa2xx_udc *dev) 1178static void udc_reinit(struct pxa25x_udc *dev)
1174{ 1179{
1175 u32 i; 1180 u32 i;
1176 1181
@@ -1181,7 +1186,7 @@ static void udc_reinit(struct pxa2xx_udc *dev)
1181 1186
1182 /* basic endpoint records init */ 1187 /* basic endpoint records init */
1183 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1188 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1184 struct pxa2xx_ep *ep = &dev->ep[i]; 1189 struct pxa25x_ep *ep = &dev->ep[i];
1185 1190
1186 if (i != 0) 1191 if (i != 0)
1187 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); 1192 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
@@ -1198,7 +1203,7 @@ static void udc_reinit(struct pxa2xx_udc *dev)
1198/* until it's enabled, this UDC should be completely invisible 1203/* until it's enabled, this UDC should be completely invisible
1199 * to any USB host. 1204 * to any USB host.
1200 */ 1205 */
1201static void udc_enable (struct pxa2xx_udc *dev) 1206static void udc_enable (struct pxa25x_udc *dev)
1202{ 1207{
1203 udc_clear_mask_UDCCR(UDCCR_UDE); 1208 udc_clear_mask_UDCCR(UDCCR_UDE);
1204 1209
@@ -1254,7 +1259,7 @@ static void udc_enable (struct pxa2xx_udc *dev)
1254 */ 1259 */
1255int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1260int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1256{ 1261{
1257 struct pxa2xx_udc *dev = the_controller; 1262 struct pxa25x_udc *dev = the_controller;
1258 int retval; 1263 int retval;
1259 1264
1260 if (!driver 1265 if (!driver
@@ -1299,7 +1304,7 @@ fail:
1299EXPORT_SYMBOL(usb_gadget_register_driver); 1304EXPORT_SYMBOL(usb_gadget_register_driver);
1300 1305
1301static void 1306static void
1302stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver) 1307stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
1303{ 1308{
1304 int i; 1309 int i;
1305 1310
@@ -1310,7 +1315,7 @@ stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1310 1315
1311 /* prevent new request submissions, kill any outstanding requests */ 1316 /* prevent new request submissions, kill any outstanding requests */
1312 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { 1317 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1313 struct pxa2xx_ep *ep = &dev->ep[i]; 1318 struct pxa25x_ep *ep = &dev->ep[i];
1314 1319
1315 ep->stopped = 1; 1320 ep->stopped = 1;
1316 nuke(ep, -ESHUTDOWN); 1321 nuke(ep, -ESHUTDOWN);
@@ -1327,7 +1332,7 @@ stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1327 1332
1328int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1333int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1329{ 1334{
1330 struct pxa2xx_udc *dev = the_controller; 1335 struct pxa25x_udc *dev = the_controller;
1331 1336
1332 if (!dev) 1337 if (!dev)
1333 return -ENODEV; 1338 return -ENODEV;
@@ -1364,7 +1369,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
1364static irqreturn_t 1369static irqreturn_t
1365lubbock_vbus_irq(int irq, void *_dev) 1370lubbock_vbus_irq(int irq, void *_dev)
1366{ 1371{
1367 struct pxa2xx_udc *dev = _dev; 1372 struct pxa25x_udc *dev = _dev;
1368 int vbus; 1373 int vbus;
1369 1374
1370 dev->stats.irqs++; 1375 dev->stats.irqs++;
@@ -1383,7 +1388,7 @@ lubbock_vbus_irq(int irq, void *_dev)
1383 return IRQ_NONE; 1388 return IRQ_NONE;
1384 } 1389 }
1385 1390
1386 pxa2xx_udc_vbus_session(&dev->gadget, vbus); 1391 pxa25x_udc_vbus_session(&dev->gadget, vbus);
1387 return IRQ_HANDLED; 1392 return IRQ_HANDLED;
1388} 1393}
1389 1394
@@ -1391,20 +1396,20 @@ lubbock_vbus_irq(int irq, void *_dev)
1391 1396
1392static irqreturn_t udc_vbus_irq(int irq, void *_dev) 1397static irqreturn_t udc_vbus_irq(int irq, void *_dev)
1393{ 1398{
1394 struct pxa2xx_udc *dev = _dev; 1399 struct pxa25x_udc *dev = _dev;
1395 int vbus = gpio_get_value(dev->mach->gpio_vbus); 1400 int vbus = gpio_get_value(dev->mach->gpio_vbus);
1396 1401
1397 if (dev->mach->gpio_vbus_inverted) 1402 if (dev->mach->gpio_vbus_inverted)
1398 vbus = !vbus; 1403 vbus = !vbus;
1399 1404
1400 pxa2xx_udc_vbus_session(&dev->gadget, vbus); 1405 pxa25x_udc_vbus_session(&dev->gadget, vbus);
1401 return IRQ_HANDLED; 1406 return IRQ_HANDLED;
1402} 1407}
1403 1408
1404 1409
1405/*-------------------------------------------------------------------------*/ 1410/*-------------------------------------------------------------------------*/
1406 1411
1407static inline void clear_ep_state (struct pxa2xx_udc *dev) 1412static inline void clear_ep_state (struct pxa25x_udc *dev)
1408{ 1413{
1409 unsigned i; 1414 unsigned i;
1410 1415
@@ -1417,7 +1422,7 @@ static inline void clear_ep_state (struct pxa2xx_udc *dev)
1417 1422
1418static void udc_watchdog(unsigned long _dev) 1423static void udc_watchdog(unsigned long _dev)
1419{ 1424{
1420 struct pxa2xx_udc *dev = (void *)_dev; 1425 struct pxa25x_udc *dev = (void *)_dev;
1421 1426
1422 local_irq_disable(); 1427 local_irq_disable();
1423 if (dev->ep0state == EP0_STALL 1428 if (dev->ep0state == EP0_STALL
@@ -1430,11 +1435,11 @@ static void udc_watchdog(unsigned long _dev)
1430 local_irq_enable(); 1435 local_irq_enable();
1431} 1436}
1432 1437
1433static void handle_ep0 (struct pxa2xx_udc *dev) 1438static void handle_ep0 (struct pxa25x_udc *dev)
1434{ 1439{
1435 u32 udccs0 = UDCCS0; 1440 u32 udccs0 = UDCCS0;
1436 struct pxa2xx_ep *ep = &dev->ep [0]; 1441 struct pxa25x_ep *ep = &dev->ep [0];
1437 struct pxa2xx_request *req; 1442 struct pxa25x_request *req;
1438 union { 1443 union {
1439 struct usb_ctrlrequest r; 1444 struct usb_ctrlrequest r;
1440 u8 raw [8]; 1445 u8 raw [8];
@@ -1444,7 +1449,7 @@ static void handle_ep0 (struct pxa2xx_udc *dev)
1444 if (list_empty(&ep->queue)) 1449 if (list_empty(&ep->queue))
1445 req = NULL; 1450 req = NULL;
1446 else 1451 else
1447 req = list_entry(ep->queue.next, struct pxa2xx_request, queue); 1452 req = list_entry(ep->queue.next, struct pxa25x_request, queue);
1448 1453
1449 /* clear stall status */ 1454 /* clear stall status */
1450 if (udccs0 & UDCCS0_SST) { 1455 if (udccs0 & UDCCS0_SST) {
@@ -1654,9 +1659,9 @@ stall:
1654 USIR0 = USIR0_IR0; 1659 USIR0 = USIR0_IR0;
1655} 1660}
1656 1661
1657static void handle_ep(struct pxa2xx_ep *ep) 1662static void handle_ep(struct pxa25x_ep *ep)
1658{ 1663{
1659 struct pxa2xx_request *req; 1664 struct pxa25x_request *req;
1660 int is_in = ep->bEndpointAddress & USB_DIR_IN; 1665 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1661 int completed; 1666 int completed;
1662 u32 udccs, tmp; 1667 u32 udccs, tmp;
@@ -1665,7 +1670,7 @@ static void handle_ep(struct pxa2xx_ep *ep)
1665 completed = 0; 1670 completed = 0;
1666 if (likely (!list_empty(&ep->queue))) 1671 if (likely (!list_empty(&ep->queue)))
1667 req = list_entry(ep->queue.next, 1672 req = list_entry(ep->queue.next,
1668 struct pxa2xx_request, queue); 1673 struct pxa25x_request, queue);
1669 else 1674 else
1670 req = NULL; 1675 req = NULL;
1671 1676
@@ -1702,16 +1707,16 @@ static void handle_ep(struct pxa2xx_ep *ep)
1702} 1707}
1703 1708
1704/* 1709/*
1705 * pxa2xx_udc_irq - interrupt handler 1710 * pxa25x_udc_irq - interrupt handler
1706 * 1711 *
1707 * avoid delays in ep0 processing. the control handshaking isn't always 1712 * avoid delays in ep0 processing. the control handshaking isn't always
1708 * under software control (pxa250c0 and the pxa255 are better), and delays 1713 * under software control (pxa250c0 and the pxa255 are better), and delays
1709 * could cause usb protocol errors. 1714 * could cause usb protocol errors.
1710 */ 1715 */
1711static irqreturn_t 1716static irqreturn_t
1712pxa2xx_udc_irq(int irq, void *_dev) 1717pxa25x_udc_irq(int irq, void *_dev)
1713{ 1718{
1714 struct pxa2xx_udc *dev = _dev; 1719 struct pxa25x_udc *dev = _dev;
1715 int handled; 1720 int handled;
1716 1721
1717 dev->stats.irqs++; 1722 dev->stats.irqs++;
@@ -1820,9 +1825,9 @@ static void nop_release (struct device *dev)
1820 * doing it at run-time) to save code, eliminate fault paths, and 1825 * doing it at run-time) to save code, eliminate fault paths, and
1821 * be more obviously correct. 1826 * be more obviously correct.
1822 */ 1827 */
1823static struct pxa2xx_udc memory = { 1828static struct pxa25x_udc memory = {
1824 .gadget = { 1829 .gadget = {
1825 .ops = &pxa2xx_udc_ops, 1830 .ops = &pxa25x_udc_ops,
1826 .ep0 = &memory.ep[0].ep, 1831 .ep0 = &memory.ep[0].ep,
1827 .name = driver_name, 1832 .name = driver_name,
1828 .dev = { 1833 .dev = {
@@ -1835,7 +1840,7 @@ static struct pxa2xx_udc memory = {
1835 .ep[0] = { 1840 .ep[0] = {
1836 .ep = { 1841 .ep = {
1837 .name = ep0name, 1842 .name = ep0name,
1838 .ops = &pxa2xx_ep_ops, 1843 .ops = &pxa25x_ep_ops,
1839 .maxpacket = EP0_FIFO_SIZE, 1844 .maxpacket = EP0_FIFO_SIZE,
1840 }, 1845 },
1841 .dev = &memory, 1846 .dev = &memory,
@@ -1847,7 +1852,7 @@ static struct pxa2xx_udc memory = {
1847 .ep[1] = { 1852 .ep[1] = {
1848 .ep = { 1853 .ep = {
1849 .name = "ep1in-bulk", 1854 .name = "ep1in-bulk",
1850 .ops = &pxa2xx_ep_ops, 1855 .ops = &pxa25x_ep_ops,
1851 .maxpacket = BULK_FIFO_SIZE, 1856 .maxpacket = BULK_FIFO_SIZE,
1852 }, 1857 },
1853 .dev = &memory, 1858 .dev = &memory,
@@ -1860,7 +1865,7 @@ static struct pxa2xx_udc memory = {
1860 .ep[2] = { 1865 .ep[2] = {
1861 .ep = { 1866 .ep = {
1862 .name = "ep2out-bulk", 1867 .name = "ep2out-bulk",
1863 .ops = &pxa2xx_ep_ops, 1868 .ops = &pxa25x_ep_ops,
1864 .maxpacket = BULK_FIFO_SIZE, 1869 .maxpacket = BULK_FIFO_SIZE,
1865 }, 1870 },
1866 .dev = &memory, 1871 .dev = &memory,
@@ -1871,11 +1876,11 @@ static struct pxa2xx_udc memory = {
1871 .reg_ubcr = &UBCR2, 1876 .reg_ubcr = &UBCR2,
1872 .reg_uddr = &UDDR2, 1877 .reg_uddr = &UDDR2,
1873 }, 1878 },
1874#ifndef CONFIG_USB_PXA2XX_SMALL 1879#ifndef CONFIG_USB_PXA25X_SMALL
1875 .ep[3] = { 1880 .ep[3] = {
1876 .ep = { 1881 .ep = {
1877 .name = "ep3in-iso", 1882 .name = "ep3in-iso",
1878 .ops = &pxa2xx_ep_ops, 1883 .ops = &pxa25x_ep_ops,
1879 .maxpacket = ISO_FIFO_SIZE, 1884 .maxpacket = ISO_FIFO_SIZE,
1880 }, 1885 },
1881 .dev = &memory, 1886 .dev = &memory,
@@ -1888,7 +1893,7 @@ static struct pxa2xx_udc memory = {
1888 .ep[4] = { 1893 .ep[4] = {
1889 .ep = { 1894 .ep = {
1890 .name = "ep4out-iso", 1895 .name = "ep4out-iso",
1891 .ops = &pxa2xx_ep_ops, 1896 .ops = &pxa25x_ep_ops,
1892 .maxpacket = ISO_FIFO_SIZE, 1897 .maxpacket = ISO_FIFO_SIZE,
1893 }, 1898 },
1894 .dev = &memory, 1899 .dev = &memory,
@@ -1902,7 +1907,7 @@ static struct pxa2xx_udc memory = {
1902 .ep[5] = { 1907 .ep[5] = {
1903 .ep = { 1908 .ep = {
1904 .name = "ep5in-int", 1909 .name = "ep5in-int",
1905 .ops = &pxa2xx_ep_ops, 1910 .ops = &pxa25x_ep_ops,
1906 .maxpacket = INT_FIFO_SIZE, 1911 .maxpacket = INT_FIFO_SIZE,
1907 }, 1912 },
1908 .dev = &memory, 1913 .dev = &memory,
@@ -1917,7 +1922,7 @@ static struct pxa2xx_udc memory = {
1917 .ep[6] = { 1922 .ep[6] = {
1918 .ep = { 1923 .ep = {
1919 .name = "ep6in-bulk", 1924 .name = "ep6in-bulk",
1920 .ops = &pxa2xx_ep_ops, 1925 .ops = &pxa25x_ep_ops,
1921 .maxpacket = BULK_FIFO_SIZE, 1926 .maxpacket = BULK_FIFO_SIZE,
1922 }, 1927 },
1923 .dev = &memory, 1928 .dev = &memory,
@@ -1930,7 +1935,7 @@ static struct pxa2xx_udc memory = {
1930 .ep[7] = { 1935 .ep[7] = {
1931 .ep = { 1936 .ep = {
1932 .name = "ep7out-bulk", 1937 .name = "ep7out-bulk",
1933 .ops = &pxa2xx_ep_ops, 1938 .ops = &pxa25x_ep_ops,
1934 .maxpacket = BULK_FIFO_SIZE, 1939 .maxpacket = BULK_FIFO_SIZE,
1935 }, 1940 },
1936 .dev = &memory, 1941 .dev = &memory,
@@ -1944,7 +1949,7 @@ static struct pxa2xx_udc memory = {
1944 .ep[8] = { 1949 .ep[8] = {
1945 .ep = { 1950 .ep = {
1946 .name = "ep8in-iso", 1951 .name = "ep8in-iso",
1947 .ops = &pxa2xx_ep_ops, 1952 .ops = &pxa25x_ep_ops,
1948 .maxpacket = ISO_FIFO_SIZE, 1953 .maxpacket = ISO_FIFO_SIZE,
1949 }, 1954 },
1950 .dev = &memory, 1955 .dev = &memory,
@@ -1957,7 +1962,7 @@ static struct pxa2xx_udc memory = {
1957 .ep[9] = { 1962 .ep[9] = {
1958 .ep = { 1963 .ep = {
1959 .name = "ep9out-iso", 1964 .name = "ep9out-iso",
1960 .ops = &pxa2xx_ep_ops, 1965 .ops = &pxa25x_ep_ops,
1961 .maxpacket = ISO_FIFO_SIZE, 1966 .maxpacket = ISO_FIFO_SIZE,
1962 }, 1967 },
1963 .dev = &memory, 1968 .dev = &memory,
@@ -1971,7 +1976,7 @@ static struct pxa2xx_udc memory = {
1971 .ep[10] = { 1976 .ep[10] = {
1972 .ep = { 1977 .ep = {
1973 .name = "ep10in-int", 1978 .name = "ep10in-int",
1974 .ops = &pxa2xx_ep_ops, 1979 .ops = &pxa25x_ep_ops,
1975 .maxpacket = INT_FIFO_SIZE, 1980 .maxpacket = INT_FIFO_SIZE,
1976 }, 1981 },
1977 .dev = &memory, 1982 .dev = &memory,
@@ -1986,7 +1991,7 @@ static struct pxa2xx_udc memory = {
1986 .ep[11] = { 1991 .ep[11] = {
1987 .ep = { 1992 .ep = {
1988 .name = "ep11in-bulk", 1993 .name = "ep11in-bulk",
1989 .ops = &pxa2xx_ep_ops, 1994 .ops = &pxa25x_ep_ops,
1990 .maxpacket = BULK_FIFO_SIZE, 1995 .maxpacket = BULK_FIFO_SIZE,
1991 }, 1996 },
1992 .dev = &memory, 1997 .dev = &memory,
@@ -1999,7 +2004,7 @@ static struct pxa2xx_udc memory = {
1999 .ep[12] = { 2004 .ep[12] = {
2000 .ep = { 2005 .ep = {
2001 .name = "ep12out-bulk", 2006 .name = "ep12out-bulk",
2002 .ops = &pxa2xx_ep_ops, 2007 .ops = &pxa25x_ep_ops,
2003 .maxpacket = BULK_FIFO_SIZE, 2008 .maxpacket = BULK_FIFO_SIZE,
2004 }, 2009 },
2005 .dev = &memory, 2010 .dev = &memory,
@@ -2013,7 +2018,7 @@ static struct pxa2xx_udc memory = {
2013 .ep[13] = { 2018 .ep[13] = {
2014 .ep = { 2019 .ep = {
2015 .name = "ep13in-iso", 2020 .name = "ep13in-iso",
2016 .ops = &pxa2xx_ep_ops, 2021 .ops = &pxa25x_ep_ops,
2017 .maxpacket = ISO_FIFO_SIZE, 2022 .maxpacket = ISO_FIFO_SIZE,
2018 }, 2023 },
2019 .dev = &memory, 2024 .dev = &memory,
@@ -2026,7 +2031,7 @@ static struct pxa2xx_udc memory = {
2026 .ep[14] = { 2031 .ep[14] = {
2027 .ep = { 2032 .ep = {
2028 .name = "ep14out-iso", 2033 .name = "ep14out-iso",
2029 .ops = &pxa2xx_ep_ops, 2034 .ops = &pxa25x_ep_ops,
2030 .maxpacket = ISO_FIFO_SIZE, 2035 .maxpacket = ISO_FIFO_SIZE,
2031 }, 2036 },
2032 .dev = &memory, 2037 .dev = &memory,
@@ -2040,7 +2045,7 @@ static struct pxa2xx_udc memory = {
2040 .ep[15] = { 2045 .ep[15] = {
2041 .ep = { 2046 .ep = {
2042 .name = "ep15in-int", 2047 .name = "ep15in-int",
2043 .ops = &pxa2xx_ep_ops, 2048 .ops = &pxa25x_ep_ops,
2044 .maxpacket = INT_FIFO_SIZE, 2049 .maxpacket = INT_FIFO_SIZE,
2045 }, 2050 },
2046 .dev = &memory, 2051 .dev = &memory,
@@ -2050,7 +2055,7 @@ static struct pxa2xx_udc memory = {
2050 .reg_udccs = &UDCCS15, 2055 .reg_udccs = &UDCCS15,
2051 .reg_uddr = &UDDR15, 2056 .reg_uddr = &UDDR15,
2052 }, 2057 },
2053#endif /* !CONFIG_USB_PXA2XX_SMALL */ 2058#endif /* !CONFIG_USB_PXA25X_SMALL */
2054}; 2059};
2055 2060
2056#define CP15R0_VENDOR_MASK 0xffffe000 2061#define CP15R0_VENDOR_MASK 0xffffe000
@@ -2090,9 +2095,9 @@ static struct pxa2xx_udc memory = {
2090/* 2095/*
2091 * probe - binds to the platform device 2096 * probe - binds to the platform device
2092 */ 2097 */
2093static int __init pxa2xx_udc_probe(struct platform_device *pdev) 2098static int __init pxa25x_udc_probe(struct platform_device *pdev)
2094{ 2099{
2095 struct pxa2xx_udc *dev = &memory; 2100 struct pxa25x_udc *dev = &memory;
2096 int retval, vbus_irq, irq; 2101 int retval, vbus_irq, irq;
2097 u32 chiprev; 2102 u32 chiprev;
2098 2103
@@ -2155,7 +2160,7 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2155 2160
2156 if (dev->mach->gpio_vbus) { 2161 if (dev->mach->gpio_vbus) {
2157 if ((retval = gpio_request(dev->mach->gpio_vbus, 2162 if ((retval = gpio_request(dev->mach->gpio_vbus,
2158 "pxa2xx_udc GPIO VBUS"))) { 2163 "pxa25x_udc GPIO VBUS"))) {
2159 dev_dbg(&pdev->dev, 2164 dev_dbg(&pdev->dev,
2160 "can't get vbus gpio %d, err: %d\n", 2165 "can't get vbus gpio %d, err: %d\n",
2161 dev->mach->gpio_vbus, retval); 2166 dev->mach->gpio_vbus, retval);
@@ -2168,7 +2173,7 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2168 2173
2169 if (dev->mach->gpio_pullup) { 2174 if (dev->mach->gpio_pullup) {
2170 if ((retval = gpio_request(dev->mach->gpio_pullup, 2175 if ((retval = gpio_request(dev->mach->gpio_pullup,
2171 "pca2xx_udc GPIO PULLUP"))) { 2176 "pca25x_udc GPIO PULLUP"))) {
2172 dev_dbg(&pdev->dev, 2177 dev_dbg(&pdev->dev,
2173 "can't get pullup gpio %d, err: %d\n", 2178 "can't get pullup gpio %d, err: %d\n",
2174 dev->mach->gpio_pullup, retval); 2179 dev->mach->gpio_pullup, retval);
@@ -2194,7 +2199,7 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2194 dev->vbus = is_vbus_present(); 2199 dev->vbus = is_vbus_present();
2195 2200
2196 /* irq setup after old hardware state is cleaned up */ 2201 /* irq setup after old hardware state is cleaned up */
2197 retval = request_irq(irq, pxa2xx_udc_irq, 2202 retval = request_irq(irq, pxa25x_udc_irq,
2198 IRQF_DISABLED, driver_name, dev); 2203 IRQF_DISABLED, driver_name, dev);
2199 if (retval != 0) { 2204 if (retval != 0) {
2200 pr_err("%s: can't get irq %d, err %d\n", 2205 pr_err("%s: can't get irq %d, err %d\n",
@@ -2260,14 +2265,14 @@ lubbock_fail0:
2260 return retval; 2265 return retval;
2261} 2266}
2262 2267
2263static void pxa2xx_udc_shutdown(struct platform_device *_dev) 2268static void pxa25x_udc_shutdown(struct platform_device *_dev)
2264{ 2269{
2265 pullup_off(); 2270 pullup_off();
2266} 2271}
2267 2272
2268static int __exit pxa2xx_udc_remove(struct platform_device *pdev) 2273static int __exit pxa25x_udc_remove(struct platform_device *pdev)
2269{ 2274{
2270 struct pxa2xx_udc *dev = platform_get_drvdata(pdev); 2275 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2271 2276
2272 if (dev->driver) 2277 if (dev->driver)
2273 return -EBUSY; 2278 return -EBUSY;
@@ -2317,9 +2322,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2317 * VBUS IRQs should probably be ignored so that the PXA device just acts 2322 * VBUS IRQs should probably be ignored so that the PXA device just acts
2318 * "dead" to USB hosts until system resume. 2323 * "dead" to USB hosts until system resume.
2319 */ 2324 */
2320static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) 2325static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
2321{ 2326{
2322 struct pxa2xx_udc *udc = platform_get_drvdata(dev); 2327 struct pxa25x_udc *udc = platform_get_drvdata(dev);
2323 unsigned long flags; 2328 unsigned long flags;
2324 2329
2325 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 2330 if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
@@ -2333,9 +2338,9 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2333 return 0; 2338 return 0;
2334} 2339}
2335 2340
2336static int pxa2xx_udc_resume(struct platform_device *dev) 2341static int pxa25x_udc_resume(struct platform_device *dev)
2337{ 2342{
2338 struct pxa2xx_udc *udc = platform_get_drvdata(dev); 2343 struct pxa25x_udc *udc = platform_get_drvdata(dev);
2339 unsigned long flags; 2344 unsigned long flags;
2340 2345
2341 udc->suspended = 0; 2346 udc->suspended = 0;
@@ -2347,27 +2352,27 @@ static int pxa2xx_udc_resume(struct platform_device *dev)
2347} 2352}
2348 2353
2349#else 2354#else
2350#define pxa2xx_udc_suspend NULL 2355#define pxa25x_udc_suspend NULL
2351#define pxa2xx_udc_resume NULL 2356#define pxa25x_udc_resume NULL
2352#endif 2357#endif
2353 2358
2354/*-------------------------------------------------------------------------*/ 2359/*-------------------------------------------------------------------------*/
2355 2360
2356static struct platform_driver udc_driver = { 2361static struct platform_driver udc_driver = {
2357 .shutdown = pxa2xx_udc_shutdown, 2362 .shutdown = pxa25x_udc_shutdown,
2358 .remove = __exit_p(pxa2xx_udc_remove), 2363 .remove = __exit_p(pxa25x_udc_remove),
2359 .suspend = pxa2xx_udc_suspend, 2364 .suspend = pxa25x_udc_suspend,
2360 .resume = pxa2xx_udc_resume, 2365 .resume = pxa25x_udc_resume,
2361 .driver = { 2366 .driver = {
2362 .owner = THIS_MODULE, 2367 .owner = THIS_MODULE,
2363 .name = "pxa2xx-udc", 2368 .name = "pxa25x-udc",
2364 }, 2369 },
2365}; 2370};
2366 2371
2367static int __init udc_init(void) 2372static int __init udc_init(void)
2368{ 2373{
2369 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); 2374 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
2370 return platform_driver_probe(&udc_driver, pxa2xx_udc_probe); 2375 return platform_driver_probe(&udc_driver, pxa25x_udc_probe);
2371} 2376}
2372module_init(udc_init); 2377module_init(udc_init);
2373 2378
@@ -2380,4 +2385,4 @@ module_exit(udc_exit);
2380MODULE_DESCRIPTION(DRIVER_DESC); 2385MODULE_DESCRIPTION(DRIVER_DESC);
2381MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2386MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2382MODULE_LICENSE("GPL"); 2387MODULE_LICENSE("GPL");
2383MODULE_ALIAS("platform:pxa2xx-udc"); 2388MODULE_ALIAS("platform:pxa25x-udc");
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index e2c19e88c875..4d11ece7c95f 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -1,6 +1,5 @@
1/* 1/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.h 2 * Intel PXA25x on-chip full speed USB device controller
3 * Intel PXA2xx on-chip full speed USB device controller
4 * 3 *
5 * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix 4 * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
6 * Copyright (C) 2003 David Brownell 5 * Copyright (C) 2003 David Brownell
@@ -21,14 +20,14 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 21 */
23 22
24#ifndef __LINUX_USB_GADGET_PXA2XX_H 23#ifndef __LINUX_USB_GADGET_PXA25X_H
25#define __LINUX_USB_GADGET_PXA2XX_H 24#define __LINUX_USB_GADGET_PXA25X_H
26 25
27#include <linux/types.h> 26#include <linux/types.h>
28 27
29/*-------------------------------------------------------------------------*/ 28/*-------------------------------------------------------------------------*/
30 29
31/* pxa2xx has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */ 30/* pxa25x has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
32#define UFNRH_SIR (1 << 7) /* SOF interrupt request */ 31#define UFNRH_SIR (1 << 7) /* SOF interrupt request */
33#define UFNRH_SIM (1 << 6) /* SOF interrupt mask */ 32#define UFNRH_SIM (1 << 6) /* SOF interrupt mask */
34#define UFNRH_IPE14 (1 << 5) /* ISO packet error, ep14 */ 33#define UFNRH_IPE14 (1 << 5) /* ISO packet error, ep14 */
@@ -45,11 +44,11 @@
45 44
46/*-------------------------------------------------------------------------*/ 45/*-------------------------------------------------------------------------*/
47 46
48struct pxa2xx_udc; 47struct pxa25x_udc;
49 48
50struct pxa2xx_ep { 49struct pxa25x_ep {
51 struct usb_ep ep; 50 struct usb_ep ep;
52 struct pxa2xx_udc *dev; 51 struct pxa25x_udc *dev;
53 52
54 const struct usb_endpoint_descriptor *desc; 53 const struct usb_endpoint_descriptor *desc;
55 struct list_head queue; 54 struct list_head queue;
@@ -72,7 +71,7 @@ struct pxa2xx_ep {
72 volatile u32 *reg_uddr; 71 volatile u32 *reg_uddr;
73}; 72};
74 73
75struct pxa2xx_request { 74struct pxa25x_request {
76 struct usb_request req; 75 struct usb_request req;
77 struct list_head queue; 76 struct list_head queue;
78}; 77};
@@ -98,7 +97,7 @@ struct udc_stats {
98 unsigned long irqs; 97 unsigned long irqs;
99}; 98};
100 99
101#ifdef CONFIG_USB_PXA2XX_SMALL 100#ifdef CONFIG_USB_PXA25X_SMALL
102/* when memory's tight, SMALL config saves code+data. */ 101/* when memory's tight, SMALL config saves code+data. */
103#define PXA_UDC_NUM_ENDPOINTS 3 102#define PXA_UDC_NUM_ENDPOINTS 3
104#endif 103#endif
@@ -107,7 +106,7 @@ struct udc_stats {
107#define PXA_UDC_NUM_ENDPOINTS 16 106#define PXA_UDC_NUM_ENDPOINTS 16
108#endif 107#endif
109 108
110struct pxa2xx_udc { 109struct pxa25x_udc {
111 struct usb_gadget gadget; 110 struct usb_gadget gadget;
112 struct usb_gadget_driver *driver; 111 struct usb_gadget_driver *driver;
113 112
@@ -130,7 +129,7 @@ struct pxa2xx_udc {
130 struct clk *clk; 129 struct clk *clk;
131 struct pxa2xx_udc_mach_info *mach; 130 struct pxa2xx_udc_mach_info *mach;
132 u64 dma_mask; 131 u64 dma_mask;
133 struct pxa2xx_ep ep [PXA_UDC_NUM_ENDPOINTS]; 132 struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
134 133
135#ifdef CONFIG_USB_GADGET_DEBUG_FS 134#ifdef CONFIG_USB_GADGET_DEBUG_FS
136 struct dentry *debugfs_udc; 135 struct dentry *debugfs_udc;
@@ -144,7 +143,7 @@ struct pxa2xx_udc {
144/* lubbock can also report usb connect/disconnect irqs */ 143/* lubbock can also report usb connect/disconnect irqs */
145#endif 144#endif
146 145
147static struct pxa2xx_udc *the_controller; 146static struct pxa25x_udc *the_controller;
148 147
149/*-------------------------------------------------------------------------*/ 148/*-------------------------------------------------------------------------*/
150 149
@@ -209,7 +208,7 @@ dump_udccs0(const char *label)
209} 208}
210 209
211static void __maybe_unused 210static void __maybe_unused
212dump_state(struct pxa2xx_udc *dev) 211dump_state(struct pxa25x_udc *dev)
213{ 212{
214 u32 tmp; 213 u32 tmp;
215 unsigned i; 214 unsigned i;
@@ -264,4 +263,4 @@ dump_state(struct pxa2xx_udc *dev)
264#define INFO(stuff...) pr_info("udc: " stuff) 263#define INFO(stuff...) pr_info("udc: " stuff)
265 264
266 265
267#endif /* __LINUX_USB_GADGET_PXA2XX_H */ 266#endif /* __LINUX_USB_GADGET_PXA25X_H */
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index e02bfd4df3a6..9c0e82ec5c43 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -38,7 +38,7 @@
38#include <linux/usb.h> 38#include <linux/usb.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41 41#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */
42#include <asm/arch/udc.h> 42#include <asm/arch/udc.h>
43 43
44#include "pxa27x_udc.h" 44#include "pxa27x_udc.h"
@@ -2360,18 +2360,19 @@ static int pxa_udc_resume(struct platform_device *_dev)
2360 * Software must configure the USB OTG pad, UDC, and UHC 2360 * Software must configure the USB OTG pad, UDC, and UHC
2361 * to the state they were in before entering sleep mode. 2361 * to the state they were in before entering sleep mode.
2362 */ 2362 */
2363 PSSR |= PSSR_OTGPH; 2363 if (cpu_is_pxa27x())
2364 PSSR |= PSSR_OTGPH;
2364 2365
2365 return 0; 2366 return 0;
2366} 2367}
2367#endif 2368#endif
2368 2369
2369/* work with hotplug and coldplug */ 2370/* work with hotplug and coldplug */
2370MODULE_ALIAS("platform:pxa2xx-udc"); 2371MODULE_ALIAS("platform:pxa27x-udc");
2371 2372
2372static struct platform_driver udc_driver = { 2373static struct platform_driver udc_driver = {
2373 .driver = { 2374 .driver = {
2374 .name = "pxa2xx-udc", 2375 .name = "pxa27x-udc",
2375 .owner = THIS_MODULE, 2376 .owner = THIS_MODULE,
2376 }, 2377 },
2377 .remove = __exit_p(pxa_udc_remove), 2378 .remove = __exit_p(pxa_udc_remove),
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index 97453db924ff..1d1b7936ee11 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -484,12 +484,4 @@ static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
484#define ep_warn(ep, fmt, arg...) \ 484#define ep_warn(ep, fmt, arg...) \
485 dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg) 485 dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
486 486
487/*
488 * Cannot include pxa-regs.h, as register names are similar.
489 * So PSSR is redefined here. This should be removed once UDC registers will
490 * be gone from pxa-regs.h.
491 */
492#define PSSR __REG(0x40F00004) /* Power Manager Sleep Status */
493#define PSSR_OTGPH (1 << 6) /* OTG Peripheral Hold */
494
495#endif /* __LINUX_USB_GADGET_PXA27X_H */ 487#endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 3a7c24c03671..a19a4f80a6e1 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -169,13 +169,16 @@ static void start_hnp(struct ohci_hcd *ohci)
169{ 169{
170 const unsigned port = ohci_to_hcd(ohci)->self.otg_port - 1; 170 const unsigned port = ohci_to_hcd(ohci)->self.otg_port - 1;
171 unsigned long flags; 171 unsigned long flags;
172 u32 l;
172 173
173 otg_start_hnp(ohci->transceiver); 174 otg_start_hnp(ohci->transceiver);
174 175
175 local_irq_save(flags); 176 local_irq_save(flags);
176 ohci->transceiver->state = OTG_STATE_A_SUSPEND; 177 ohci->transceiver->state = OTG_STATE_A_SUSPEND;
177 writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]); 178 writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
178 OTG_CTRL_REG &= ~OTG_A_BUSREQ; 179 l = omap_readl(OTG_CTRL);
180 l &= ~OTG_A_BUSREQ;
181 omap_writel(l, OTG_CTRL);
179 local_irq_restore(flags); 182 local_irq_restore(flags);
180} 183}
181 184
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index d4ee27d92be8..127b15799024 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -27,6 +27,7 @@
27#include <asm/mach-types.h> 27#include <asm/mach-types.h>
28#include <asm/hardware.h> 28#include <asm/hardware.h>
29#include <asm/arch/pxa-regs.h> 29#include <asm/arch/pxa-regs.h>
30#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */
30#include <asm/arch/ohci.h> 31#include <asm/arch/ohci.h>
31 32
32#define PXA_UHC_MAX_PORTNUM 3 33#define PXA_UHC_MAX_PORTNUM 3
@@ -104,7 +105,7 @@ static int pxa27x_start_hc(struct device *dev)
104 UHCHIE = (UHCHIE_UPRIE | UHCHIE_RWIE); 105 UHCHIE = (UHCHIE_UPRIE | UHCHIE_RWIE);
105 106
106 /* Clear any OTG Pin Hold */ 107 /* Clear any OTG Pin Hold */
107 if (PSSR & PSSR_OTGPH) 108 if (cpu_is_pxa27x() && (PSSR & PSSR_OTGPH))
108 PSSR |= PSSR_OTGPH; 109 PSSR |= PSSR_OTGPH;
109 110
110 return 0; 111 return 0;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 49145534e06e..293a46247c3b 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -15,6 +15,7 @@
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/smp_lock.h>
18 19
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20 21
@@ -527,14 +528,17 @@ static int mon_bin_open(struct inode *inode, struct file *file)
527 size_t size; 528 size_t size;
528 int rc; 529 int rc;
529 530
531 lock_kernel();
530 mutex_lock(&mon_lock); 532 mutex_lock(&mon_lock);
531 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 533 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
532 mutex_unlock(&mon_lock); 534 mutex_unlock(&mon_lock);
535 unlock_kernel();
533 return -ENODEV; 536 return -ENODEV;
534 } 537 }
535 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 538 if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
536 printk(KERN_ERR TAG ": consistency error on open\n"); 539 printk(KERN_ERR TAG ": consistency error on open\n");
537 mutex_unlock(&mon_lock); 540 mutex_unlock(&mon_lock);
541 unlock_kernel();
538 return -ENODEV; 542 return -ENODEV;
539 } 543 }
540 544
@@ -568,6 +572,7 @@ static int mon_bin_open(struct inode *inode, struct file *file)
568 572
569 file->private_data = rp; 573 file->private_data = rp;
570 mutex_unlock(&mon_lock); 574 mutex_unlock(&mon_lock);
575 unlock_kernel();
571 return 0; 576 return 0;
572 577
573err_allocbuff: 578err_allocbuff:
@@ -576,6 +581,7 @@ err_allocvec:
576 kfree(rp); 581 kfree(rp);
577err_alloc: 582err_alloc:
578 mutex_unlock(&mon_lock); 583 mutex_unlock(&mon_lock);
584 unlock_kernel();
579 return rc; 585 return rc;
580} 586}
581 587
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e0c5f96b273d..9b887ef64ff1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -7,7 +7,7 @@ menu "Graphics support"
7 7
8source "drivers/char/agp/Kconfig" 8source "drivers/char/agp/Kconfig"
9 9
10source "drivers/char/drm/Kconfig" 10source "drivers/gpu/drm/Kconfig"
11 11
12config VGASTATE 12config VGASTATE
13 tristate 13 tristate
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index dcd8073c2369..30bf7f2f1635 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -112,3 +112,10 @@ config BACKLIGHT_CARILLO_RANCH
112 help 112 help
113 If you have a Intel LE80578 (Carillo Ranch) say Y to enable the 113 If you have a Intel LE80578 (Carillo Ranch) say Y to enable the
114 backlight driver. 114 backlight driver.
115
116config BACKLIGHT_PWM
117 tristate "Generic PWM based Backlight Driver"
118 depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM
119 help
120 If you have a LCD backlight adjustable by PWM, say Y to enable
121 this driver.
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 33f6c7cecc73..b51a7cd12500 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
10obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o 10obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
11obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o 11obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
12obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o 12obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
13obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
new file mode 100644
index 000000000000..6338d0e2fe07
--- /dev/null
+++ b/drivers/video/backlight/pwm_bl.c
@@ -0,0 +1,185 @@
1/*
2 * linux/drivers/video/backlight/pwm_bl.c
3 *
4 * simple PWM based backlight control, board code has to setup
5 * 1) pin configuration so PWM waveforms can output
6 * 2) platform_data casts to the PWM id (0/1/2/3 on PXA)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/fb.h>
18#include <linux/backlight.h>
19#include <linux/err.h>
20#include <linux/pwm.h>
21#include <linux/pwm_backlight.h>
22
23struct pwm_bl_data {
24 struct pwm_device *pwm;
25 unsigned int period;
26 int (*notify)(int brightness);
27};
28
29static int pwm_backlight_update_status(struct backlight_device *bl)
30{
31 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
32 int brightness = bl->props.brightness;
33 int max = bl->props.max_brightness;
34
35 if (bl->props.power != FB_BLANK_UNBLANK)
36 brightness = 0;
37
38 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
39 brightness = 0;
40
41 if (pb->notify)
42 brightness = pb->notify(brightness);
43
44 if (brightness == 0) {
45 pwm_config(pb->pwm, 0, pb->period);
46 pwm_disable(pb->pwm);
47 } else {
48 pwm_config(pb->pwm, brightness * pb->period / max, pb->period);
49 pwm_enable(pb->pwm);
50 }
51 return 0;
52}
53
54static int pwm_backlight_get_brightness(struct backlight_device *bl)
55{
56 return bl->props.brightness;
57}
58
59static struct backlight_ops pwm_backlight_ops = {
60 .update_status = pwm_backlight_update_status,
61 .get_brightness = pwm_backlight_get_brightness,
62};
63
64static int pwm_backlight_probe(struct platform_device *pdev)
65{
66 struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
67 struct backlight_device *bl;
68 struct pwm_bl_data *pb;
69 int ret;
70
71 if (!data)
72 return -EINVAL;
73
74 if (data->init) {
75 ret = data->init(&pdev->dev);
76 if (ret < 0)
77 return ret;
78 }
79
80 pb = kzalloc(sizeof(*pb), GFP_KERNEL);
81 if (!pb) {
82 ret = -ENOMEM;
83 goto err_alloc;
84 }
85
86 pb->period = data->pwm_period_ns;
87 pb->notify = data->notify;
88
89 pb->pwm = pwm_request(data->pwm_id, "backlight");
90 if (IS_ERR(pb->pwm)) {
91 dev_err(&pdev->dev, "unable to request PWM for backlight\n");
92 ret = PTR_ERR(pb->pwm);
93 goto err_pwm;
94 }
95
96 bl = backlight_device_register(pdev->name, &pdev->dev,
97 pb, &pwm_backlight_ops);
98 if (IS_ERR(bl)) {
99 dev_err(&pdev->dev, "failed to register backlight\n");
100 ret = PTR_ERR(bl);
101 goto err_bl;
102 }
103
104 bl->props.max_brightness = data->max_brightness;
105 bl->props.brightness = data->dft_brightness;
106 backlight_update_status(bl);
107
108 platform_set_drvdata(pdev, bl);
109 return 0;
110
111err_bl:
112 pwm_free(pb->pwm);
113err_pwm:
114 kfree(pb);
115err_alloc:
116 if (data->exit)
117 data->exit(&pdev->dev);
118 return ret;
119}
120
121static int pwm_backlight_remove(struct platform_device *pdev)
122{
123 struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
124 struct backlight_device *bl = platform_get_drvdata(pdev);
125 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
126
127 backlight_device_unregister(bl);
128 pwm_config(pb->pwm, 0, pb->period);
129 pwm_disable(pb->pwm);
130 pwm_free(pb->pwm);
131 kfree(pb);
132 if (data->exit)
133 data->exit(&pdev->dev);
134 return 0;
135}
136
137#ifdef CONFIG_PM
138static int pwm_backlight_suspend(struct platform_device *pdev,
139 pm_message_t state)
140{
141 struct backlight_device *bl = platform_get_drvdata(pdev);
142 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
143
144 pwm_config(pb->pwm, 0, pb->period);
145 pwm_disable(pb->pwm);
146 return 0;
147}
148
149static int pwm_backlight_resume(struct platform_device *pdev)
150{
151 struct backlight_device *bl = platform_get_drvdata(pdev);
152
153 backlight_update_status(bl);
154 return 0;
155}
156#else
157#define pwm_backlight_suspend NULL
158#define pwm_backlight_resume NULL
159#endif
160
161static struct platform_driver pwm_backlight_driver = {
162 .driver = {
163 .name = "pwm-backlight",
164 .owner = THIS_MODULE,
165 },
166 .probe = pwm_backlight_probe,
167 .remove = pwm_backlight_remove,
168 .suspend = pwm_backlight_suspend,
169 .resume = pwm_backlight_resume,
170};
171
172static int __init pwm_backlight_init(void)
173{
174 return platform_driver_register(&pwm_backlight_driver);
175}
176module_init(pwm_backlight_init);
177
178static void __exit pwm_backlight_exit(void)
179{
180 platform_driver_unregister(&pwm_backlight_driver);
181}
182module_exit(pwm_backlight_exit);
183
184MODULE_DESCRIPTION("PWM based Backlight Driver");
185MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 776f7fcd2fbf..33ebdb198daf 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1326,20 +1326,27 @@ fb_open(struct inode *inode, struct file *file)
1326 1326
1327 if (fbidx >= FB_MAX) 1327 if (fbidx >= FB_MAX)
1328 return -ENODEV; 1328 return -ENODEV;
1329 lock_kernel();
1329#ifdef CONFIG_KMOD 1330#ifdef CONFIG_KMOD
1330 if (!(info = registered_fb[fbidx])) 1331 if (!(info = registered_fb[fbidx]))
1331 try_to_load(fbidx); 1332 try_to_load(fbidx);
1332#endif /* CONFIG_KMOD */ 1333#endif /* CONFIG_KMOD */
1333 if (!(info = registered_fb[fbidx])) 1334 if (!(info = registered_fb[fbidx])) {
1334 return -ENODEV; 1335 res = -ENODEV;
1335 if (!try_module_get(info->fbops->owner)) 1336 goto out;
1336 return -ENODEV; 1337 }
1338 if (!try_module_get(info->fbops->owner)) {
1339 res = -ENODEV;
1340 goto out;
1341 }
1337 file->private_data = info; 1342 file->private_data = info;
1338 if (info->fbops->fb_open) { 1343 if (info->fbops->fb_open) {
1339 res = info->fbops->fb_open(info,1); 1344 res = info->fbops->fb_open(info,1);
1340 if (res) 1345 if (res)
1341 module_put(info->fbops->owner); 1346 module_put(info->fbops->owner);
1342 } 1347 }
1348out:
1349 unlock_kernel();
1343 return res; 1350 return res;
1344} 1351}
1345 1352
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index fafe7db20d6d..d0746261c957 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1792,11 +1792,49 @@ failed:
1792 return ret; 1792 return ret;
1793} 1793}
1794 1794
1795static int __devexit pxafb_remove(struct platform_device *dev)
1796{
1797 struct pxafb_info *fbi = platform_get_drvdata(dev);
1798 struct resource *r;
1799 int irq;
1800 struct fb_info *info;
1801
1802 if (!fbi)
1803 return 0;
1804
1805 info = &fbi->fb;
1806
1807 unregister_framebuffer(info);
1808
1809 pxafb_disable_controller(fbi);
1810
1811 if (fbi->fb.cmap.len)
1812 fb_dealloc_cmap(&fbi->fb.cmap);
1813
1814 irq = platform_get_irq(dev, 0);
1815 free_irq(irq, fbi);
1816
1817 dma_free_writecombine(&dev->dev, fbi->map_size,
1818 fbi->map_cpu, fbi->map_dma);
1819
1820 iounmap(fbi->mmio_base);
1821
1822 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
1823 release_mem_region(r->start, r->end - r->start + 1);
1824
1825 clk_put(fbi->clk);
1826 kfree(fbi);
1827
1828 return 0;
1829}
1830
1795static struct platform_driver pxafb_driver = { 1831static struct platform_driver pxafb_driver = {
1796 .probe = pxafb_probe, 1832 .probe = pxafb_probe,
1833 .remove = pxafb_remove,
1797 .suspend = pxafb_suspend, 1834 .suspend = pxafb_suspend,
1798 .resume = pxafb_resume, 1835 .resume = pxafb_resume,
1799 .driver = { 1836 .driver = {
1837 .owner = THIS_MODULE,
1800 .name = "pxa2xx-fb", 1838 .name = "pxa2xx-fb",
1801 }, 1839 },
1802}; 1840};
@@ -1809,7 +1847,13 @@ static int __init pxafb_init(void)
1809 return platform_driver_register(&pxafb_driver); 1847 return platform_driver_register(&pxafb_driver);
1810} 1848}
1811 1849
1850static void __exit pxafb_exit(void)
1851{
1852 platform_driver_unregister(&pxafb_driver);
1853}
1854
1812module_init(pxafb_init); 1855module_init(pxafb_init);
1856module_exit(pxafb_exit);
1813 1857
1814MODULE_DESCRIPTION("loadable framebuffer driver for PXA"); 1858MODULE_DESCRIPTION("loadable framebuffer driver for PXA");
1815MODULE_LICENSE("GPL"); 1859MODULE_LICENSE("GPL");
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 4fb16240c04d..f5252c2552fd 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -21,8 +21,7 @@
21 21
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/mtrr.h> 23#include <asm/mtrr.h>
24 24#include <asm/visws/sgivw.h>
25#include <setup_arch.h>
26 25
27#define INCLUDE_TIMING_TABLE_DATA 26#define INCLUDE_TIMING_TABLE_DATA
28#define DBE_REG_BASE par->regs 27#define DBE_REG_BASE par->regs
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 619a6f8d65a2..47ed39b52f9c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -18,6 +18,7 @@
18 * frame buffer. 18 * frame buffer.
19 */ 19 */
20 20
21#include <linux/console.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fb.h> 24#include <linux/fb.h>
@@ -42,37 +43,68 @@ struct xenfb_info {
42 struct xenfb_page *page; 43 struct xenfb_page *page;
43 unsigned long *mfns; 44 unsigned long *mfns;
44 int update_wanted; /* XENFB_TYPE_UPDATE wanted */ 45 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
46 int feature_resize; /* XENFB_TYPE_RESIZE ok */
47 struct xenfb_resize resize; /* protected by resize_lock */
48 int resize_dpy; /* ditto */
49 spinlock_t resize_lock;
45 50
46 struct xenbus_device *xbdev; 51 struct xenbus_device *xbdev;
47}; 52};
48 53
49static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8; 54#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
50 55
56enum { KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT };
57static int video[KPARAM_CNT] = { 2, XENFB_WIDTH, XENFB_HEIGHT };
58module_param_array(video, int, NULL, 0);
59MODULE_PARM_DESC(video,
60 "Video memory size in MB, width, height in pixels (default 2,800,600)");
61
62static void xenfb_make_preferred_console(void);
51static int xenfb_remove(struct xenbus_device *); 63static int xenfb_remove(struct xenbus_device *);
52static void xenfb_init_shared_page(struct xenfb_info *); 64static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
53static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *); 65static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
54static void xenfb_disconnect_backend(struct xenfb_info *); 66static void xenfb_disconnect_backend(struct xenfb_info *);
55 67
68static void xenfb_send_event(struct xenfb_info *info,
69 union xenfb_out_event *event)
70{
71 u32 prod;
72
73 prod = info->page->out_prod;
74 /* caller ensures !xenfb_queue_full() */
75 mb(); /* ensure ring space available */
76 XENFB_OUT_RING_REF(info->page, prod) = *event;
77 wmb(); /* ensure ring contents visible */
78 info->page->out_prod = prod + 1;
79
80 notify_remote_via_irq(info->irq);
81}
82
56static void xenfb_do_update(struct xenfb_info *info, 83static void xenfb_do_update(struct xenfb_info *info,
57 int x, int y, int w, int h) 84 int x, int y, int w, int h)
58{ 85{
59 union xenfb_out_event event; 86 union xenfb_out_event event;
60 u32 prod;
61 87
88 memset(&event, 0, sizeof(event));
62 event.type = XENFB_TYPE_UPDATE; 89 event.type = XENFB_TYPE_UPDATE;
63 event.update.x = x; 90 event.update.x = x;
64 event.update.y = y; 91 event.update.y = y;
65 event.update.width = w; 92 event.update.width = w;
66 event.update.height = h; 93 event.update.height = h;
67 94
68 prod = info->page->out_prod;
69 /* caller ensures !xenfb_queue_full() */ 95 /* caller ensures !xenfb_queue_full() */
70 mb(); /* ensure ring space available */ 96 xenfb_send_event(info, &event);
71 XENFB_OUT_RING_REF(info->page, prod) = event; 97}
72 wmb(); /* ensure ring contents visible */
73 info->page->out_prod = prod + 1;
74 98
75 notify_remote_via_irq(info->irq); 99static void xenfb_do_resize(struct xenfb_info *info)
100{
101 union xenfb_out_event event;
102
103 memset(&event, 0, sizeof(event));
104 event.resize = info->resize;
105
106 /* caller ensures !xenfb_queue_full() */
107 xenfb_send_event(info, &event);
76} 108}
77 109
78static int xenfb_queue_full(struct xenfb_info *info) 110static int xenfb_queue_full(struct xenfb_info *info)
@@ -84,12 +116,28 @@ static int xenfb_queue_full(struct xenfb_info *info)
84 return prod - cons == XENFB_OUT_RING_LEN; 116 return prod - cons == XENFB_OUT_RING_LEN;
85} 117}
86 118
119static void xenfb_handle_resize_dpy(struct xenfb_info *info)
120{
121 unsigned long flags;
122
123 spin_lock_irqsave(&info->resize_lock, flags);
124 if (info->resize_dpy) {
125 if (!xenfb_queue_full(info)) {
126 info->resize_dpy = 0;
127 xenfb_do_resize(info);
128 }
129 }
130 spin_unlock_irqrestore(&info->resize_lock, flags);
131}
132
87static void xenfb_refresh(struct xenfb_info *info, 133static void xenfb_refresh(struct xenfb_info *info,
88 int x1, int y1, int w, int h) 134 int x1, int y1, int w, int h)
89{ 135{
90 unsigned long flags; 136 unsigned long flags;
91 int y2 = y1 + h - 1;
92 int x2 = x1 + w - 1; 137 int x2 = x1 + w - 1;
138 int y2 = y1 + h - 1;
139
140 xenfb_handle_resize_dpy(info);
93 141
94 if (!info->update_wanted) 142 if (!info->update_wanted)
95 return; 143 return;
@@ -222,6 +270,57 @@ static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
222 return res; 270 return res;
223} 271}
224 272
273static int
274xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
275{
276 struct xenfb_info *xenfb_info;
277 int required_mem_len;
278
279 xenfb_info = info->par;
280
281 if (!xenfb_info->feature_resize) {
282 if (var->xres == video[KPARAM_WIDTH] &&
283 var->yres == video[KPARAM_HEIGHT] &&
284 var->bits_per_pixel == xenfb_info->page->depth) {
285 return 0;
286 }
287 return -EINVAL;
288 }
289
290 /* Can't resize past initial width and height */
291 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
292 return -EINVAL;
293
294 required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
295 if (var->bits_per_pixel == xenfb_info->page->depth &&
296 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
297 required_mem_len <= info->fix.smem_len) {
298 var->xres_virtual = var->xres;
299 var->yres_virtual = var->yres;
300 return 0;
301 }
302 return -EINVAL;
303}
304
305static int xenfb_set_par(struct fb_info *info)
306{
307 struct xenfb_info *xenfb_info;
308 unsigned long flags;
309
310 xenfb_info = info->par;
311
312 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
313 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
314 xenfb_info->resize.width = info->var.xres;
315 xenfb_info->resize.height = info->var.yres;
316 xenfb_info->resize.stride = info->fix.line_length;
317 xenfb_info->resize.depth = info->var.bits_per_pixel;
318 xenfb_info->resize.offset = 0;
319 xenfb_info->resize_dpy = 1;
320 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
321 return 0;
322}
323
225static struct fb_ops xenfb_fb_ops = { 324static struct fb_ops xenfb_fb_ops = {
226 .owner = THIS_MODULE, 325 .owner = THIS_MODULE,
227 .fb_read = fb_sys_read, 326 .fb_read = fb_sys_read,
@@ -230,6 +329,8 @@ static struct fb_ops xenfb_fb_ops = {
230 .fb_fillrect = xenfb_fillrect, 329 .fb_fillrect = xenfb_fillrect,
231 .fb_copyarea = xenfb_copyarea, 330 .fb_copyarea = xenfb_copyarea,
232 .fb_imageblit = xenfb_imageblit, 331 .fb_imageblit = xenfb_imageblit,
332 .fb_check_var = xenfb_check_var,
333 .fb_set_par = xenfb_set_par,
233}; 334};
234 335
235static irqreturn_t xenfb_event_handler(int rq, void *dev_id) 336static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
@@ -258,6 +359,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
258{ 359{
259 struct xenfb_info *info; 360 struct xenfb_info *info;
260 struct fb_info *fb_info; 361 struct fb_info *fb_info;
362 int fb_size;
363 int val;
261 int ret; 364 int ret;
262 365
263 info = kzalloc(sizeof(*info), GFP_KERNEL); 366 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -265,18 +368,35 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
265 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 368 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
266 return -ENOMEM; 369 return -ENOMEM;
267 } 370 }
371
372 /* Limit kernel param videoram amount to what is in xenstore */
373 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
374 if (val < video[KPARAM_MEM])
375 video[KPARAM_MEM] = val;
376 }
377
378 /* If requested res does not fit in available memory, use default */
379 fb_size = video[KPARAM_MEM] * 1024 * 1024;
380 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8
381 > fb_size) {
382 video[KPARAM_WIDTH] = XENFB_WIDTH;
383 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
384 fb_size = XENFB_DEFAULT_FB_LEN;
385 }
386
268 dev->dev.driver_data = info; 387 dev->dev.driver_data = info;
269 info->xbdev = dev; 388 info->xbdev = dev;
270 info->irq = -1; 389 info->irq = -1;
271 info->x1 = info->y1 = INT_MAX; 390 info->x1 = info->y1 = INT_MAX;
272 spin_lock_init(&info->dirty_lock); 391 spin_lock_init(&info->dirty_lock);
392 spin_lock_init(&info->resize_lock);
273 393
274 info->fb = vmalloc(xenfb_mem_len); 394 info->fb = vmalloc(fb_size);
275 if (info->fb == NULL) 395 if (info->fb == NULL)
276 goto error_nomem; 396 goto error_nomem;
277 memset(info->fb, 0, xenfb_mem_len); 397 memset(info->fb, 0, fb_size);
278 398
279 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 399 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
280 400
281 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); 401 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
282 if (!info->mfns) 402 if (!info->mfns)
@@ -287,8 +407,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
287 if (!info->page) 407 if (!info->page)
288 goto error_nomem; 408 goto error_nomem;
289 409
290 xenfb_init_shared_page(info);
291
292 /* abusing framebuffer_alloc() to allocate pseudo_palette */ 410 /* abusing framebuffer_alloc() to allocate pseudo_palette */
293 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL); 411 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
294 if (fb_info == NULL) 412 if (fb_info == NULL)
@@ -301,9 +419,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
301 fb_info->screen_base = info->fb; 419 fb_info->screen_base = info->fb;
302 420
303 fb_info->fbops = &xenfb_fb_ops; 421 fb_info->fbops = &xenfb_fb_ops;
304 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width; 422 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
305 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height; 423 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
306 fb_info->var.bits_per_pixel = info->page->depth; 424 fb_info->var.bits_per_pixel = XENFB_DEPTH;
307 425
308 fb_info->var.red = (struct fb_bitfield){16, 8, 0}; 426 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
309 fb_info->var.green = (struct fb_bitfield){8, 8, 0}; 427 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
@@ -315,9 +433,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
315 fb_info->var.vmode = FB_VMODE_NONINTERLACED; 433 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
316 434
317 fb_info->fix.visual = FB_VISUAL_TRUECOLOR; 435 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
318 fb_info->fix.line_length = info->page->line_length; 436 fb_info->fix.line_length = fb_info->var.xres * XENFB_DEPTH / 8;
319 fb_info->fix.smem_start = 0; 437 fb_info->fix.smem_start = 0;
320 fb_info->fix.smem_len = xenfb_mem_len; 438 fb_info->fix.smem_len = fb_size;
321 strcpy(fb_info->fix.id, "xen"); 439 strcpy(fb_info->fix.id, "xen");
322 fb_info->fix.type = FB_TYPE_PACKED_PIXELS; 440 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
323 fb_info->fix.accel = FB_ACCEL_NONE; 441 fb_info->fix.accel = FB_ACCEL_NONE;
@@ -334,6 +452,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
334 fb_info->fbdefio = &xenfb_defio; 452 fb_info->fbdefio = &xenfb_defio;
335 fb_deferred_io_init(fb_info); 453 fb_deferred_io_init(fb_info);
336 454
455 xenfb_init_shared_page(info, fb_info);
456
337 ret = register_framebuffer(fb_info); 457 ret = register_framebuffer(fb_info);
338 if (ret) { 458 if (ret) {
339 fb_deferred_io_cleanup(fb_info); 459 fb_deferred_io_cleanup(fb_info);
@@ -348,6 +468,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
348 if (ret < 0) 468 if (ret < 0)
349 goto error; 469 goto error;
350 470
471 xenfb_make_preferred_console();
351 return 0; 472 return 0;
352 473
353 error_nomem: 474 error_nomem:
@@ -358,12 +479,34 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
358 return ret; 479 return ret;
359} 480}
360 481
482static __devinit void
483xenfb_make_preferred_console(void)
484{
485 struct console *c;
486
487 if (console_set_on_cmdline)
488 return;
489
490 acquire_console_sem();
491 for (c = console_drivers; c; c = c->next) {
492 if (!strcmp(c->name, "tty") && c->index == 0)
493 break;
494 }
495 release_console_sem();
496 if (c) {
497 unregister_console(c);
498 c->flags |= CON_CONSDEV;
499 c->flags &= ~CON_PRINTBUFFER; /* don't print again */
500 register_console(c);
501 }
502}
503
361static int xenfb_resume(struct xenbus_device *dev) 504static int xenfb_resume(struct xenbus_device *dev)
362{ 505{
363 struct xenfb_info *info = dev->dev.driver_data; 506 struct xenfb_info *info = dev->dev.driver_data;
364 507
365 xenfb_disconnect_backend(info); 508 xenfb_disconnect_backend(info);
366 xenfb_init_shared_page(info); 509 xenfb_init_shared_page(info, info->fb_info);
367 return xenfb_connect_backend(dev, info); 510 return xenfb_connect_backend(dev, info);
368} 511}
369 512
@@ -391,20 +534,23 @@ static unsigned long vmalloc_to_mfn(void *address)
391 return pfn_to_mfn(vmalloc_to_pfn(address)); 534 return pfn_to_mfn(vmalloc_to_pfn(address));
392} 535}
393 536
394static void xenfb_init_shared_page(struct xenfb_info *info) 537static void xenfb_init_shared_page(struct xenfb_info *info,
538 struct fb_info *fb_info)
395{ 539{
396 int i; 540 int i;
541 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
397 542
398 for (i = 0; i < info->nr_pages; i++) 543 for (i = 0; i < info->nr_pages; i++)
399 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); 544 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
400 545
401 info->page->pd[0] = vmalloc_to_mfn(info->mfns); 546 for (i = 0; i * epd < info->nr_pages; i++)
402 info->page->pd[1] = 0; 547 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
403 info->page->width = XENFB_WIDTH; 548
404 info->page->height = XENFB_HEIGHT; 549 info->page->width = fb_info->var.xres;
405 info->page->depth = XENFB_DEPTH; 550 info->page->height = fb_info->var.yres;
406 info->page->line_length = (info->page->depth / 8) * info->page->width; 551 info->page->depth = fb_info->var.bits_per_pixel;
407 info->page->mem_length = xenfb_mem_len; 552 info->page->line_length = fb_info->fix.line_length;
553 info->page->mem_length = fb_info->fix.smem_len;
408 info->page->in_cons = info->page->in_prod = 0; 554 info->page->in_cons = info->page->in_prod = 0;
409 info->page->out_cons = info->page->out_prod = 0; 555 info->page->out_cons = info->page->out_prod = 0;
410} 556}
@@ -504,6 +650,11 @@ InitWait:
504 val = 0; 650 val = 0;
505 if (val) 651 if (val)
506 info->update_wanted = 1; 652 info->update_wanted = 1;
653
654 if (xenbus_scanf(XBT_NIL, dev->otherend,
655 "feature-resize", "%d", &val) < 0)
656 val = 0;
657 info->feature_resize = val;
507 break; 658 break;
508 659
509 case XenbusStateClosing: 660 case XenbusStateClosing:
@@ -547,4 +698,6 @@ static void __exit xenfb_cleanup(void)
547module_init(xenfb_init); 698module_init(xenfb_init);
548module_exit(xenfb_cleanup); 699module_exit(xenfb_cleanup);
549 700
701MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
550MODULE_LICENSE("GPL"); 702MODULE_LICENSE("GPL");
703MODULE_ALIAS("xen:vfb");
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 37af04f1ffd9..363286c54290 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y += grant-table.o features.o events.o 1obj-y += grant-table.o features.o events.o manage.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o 4obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ab25ba6cbbb9..591bc29b55f5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -225,7 +225,7 @@ static int increase_reservation(unsigned long nr_pages)
225 page = balloon_next_page(page); 225 page = balloon_next_page(page);
226 } 226 }
227 227
228 reservation.extent_start = (unsigned long)frame_list; 228 set_xen_guest_handle(reservation.extent_start, frame_list);
229 reservation.nr_extents = nr_pages; 229 reservation.nr_extents = nr_pages;
230 rc = HYPERVISOR_memory_op( 230 rc = HYPERVISOR_memory_op(
231 XENMEM_populate_physmap, &reservation); 231 XENMEM_populate_physmap, &reservation);
@@ -321,7 +321,7 @@ static int decrease_reservation(unsigned long nr_pages)
321 balloon_append(pfn_to_page(pfn)); 321 balloon_append(pfn_to_page(pfn));
322 } 322 }
323 323
324 reservation.extent_start = (unsigned long)frame_list; 324 set_xen_guest_handle(reservation.extent_start, frame_list);
325 reservation.nr_extents = nr_pages; 325 reservation.nr_extents = nr_pages;
326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
327 BUG_ON(ret != nr_pages); 327 BUG_ON(ret != nr_pages);
@@ -368,7 +368,7 @@ static void balloon_process(struct work_struct *work)
368} 368}
369 369
370/* Resets the Xen limit, sets new target, and kicks off processing. */ 370/* Resets the Xen limit, sets new target, and kicks off processing. */
371void balloon_set_new_target(unsigned long target) 371static void balloon_set_new_target(unsigned long target)
372{ 372{
373 /* No need for lock. Not read-modify-write updates. */ 373 /* No need for lock. Not read-modify-write updates. */
374 balloon_stats.hard_limit = ~0UL; 374 balloon_stats.hard_limit = ~0UL;
@@ -483,7 +483,7 @@ static int dealloc_pte_fn(
483 .extent_order = 0, 483 .extent_order = 0,
484 .domid = DOMID_SELF 484 .domid = DOMID_SELF
485 }; 485 };
486 reservation.extent_start = (unsigned long)&mfn; 486 set_xen_guest_handle(reservation.extent_start, &mfn);
487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); 487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); 488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
@@ -519,7 +519,7 @@ static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
519 .extent_order = 0, 519 .extent_order = 0,
520 .domid = DOMID_SELF 520 .domid = DOMID_SELF
521 }; 521 };
522 reservation.extent_start = (unsigned long)&gmfn; 522 set_xen_guest_handle(reservation.extent_start, &gmfn);
523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
524 &reservation); 524 &reservation);
525 if (ret == 1) 525 if (ret == 1)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 76e5b7386af9..332dd63750a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -355,7 +355,7 @@ static void unbind_from_irq(unsigned int irq)
355 355
356 spin_lock(&irq_mapping_update_lock); 356 spin_lock(&irq_mapping_update_lock);
357 357
358 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { 358 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
359 close.port = evtchn; 359 close.port = evtchn;
360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
361 BUG(); 361 BUG();
@@ -375,7 +375,7 @@ static void unbind_from_irq(unsigned int irq)
375 evtchn_to_irq[evtchn] = -1; 375 evtchn_to_irq[evtchn] = -1;
376 irq_info[irq] = IRQ_UNBOUND; 376 irq_info[irq] = IRQ_UNBOUND;
377 377
378 dynamic_irq_init(irq); 378 dynamic_irq_cleanup(irq);
379 } 379 }
380 380
381 spin_unlock(&irq_mapping_update_lock); 381 spin_unlock(&irq_mapping_update_lock);
@@ -557,6 +557,33 @@ out:
557 put_cpu(); 557 put_cpu();
558} 558}
559 559
560/* Rebind a new event channel to an existing irq. */
561void rebind_evtchn_irq(int evtchn, int irq)
562{
563 /* Make sure the irq is masked, since the new event channel
564 will also be masked. */
565 disable_irq(irq);
566
567 spin_lock(&irq_mapping_update_lock);
568
569 /* After resume the irq<->evtchn mappings are all cleared out */
570 BUG_ON(evtchn_to_irq[evtchn] != -1);
571 /* Expect irq to have been bound before,
572 so the bindcount should be non-0 */
573 BUG_ON(irq_bindcount[irq] == 0);
574
575 evtchn_to_irq[evtchn] = irq;
576 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
577
578 spin_unlock(&irq_mapping_update_lock);
579
580 /* new event channels are always bound to cpu 0 */
581 irq_set_affinity(irq, cpumask_of_cpu(0));
582
583 /* Unmask the event channel. */
584 enable_irq(irq);
585}
586
560/* Rebind an evtchn so that it gets delivered to a specific cpu */ 587/* Rebind an evtchn so that it gets delivered to a specific cpu */
561static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 588static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
562{ 589{
@@ -647,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq)
647 return ret; 674 return ret;
648} 675}
649 676
677static void restore_cpu_virqs(unsigned int cpu)
678{
679 struct evtchn_bind_virq bind_virq;
680 int virq, irq, evtchn;
681
682 for (virq = 0; virq < NR_VIRQS; virq++) {
683 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
684 continue;
685
686 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
687 BUG_ON(irq_info[irq].index != virq);
688
689 /* Get a new binding from Xen. */
690 bind_virq.virq = virq;
691 bind_virq.vcpu = cpu;
692 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
693 &bind_virq) != 0)
694 BUG();
695 evtchn = bind_virq.port;
696
697 /* Record the new mapping. */
698 evtchn_to_irq[evtchn] = irq;
699 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
700 bind_evtchn_to_cpu(evtchn, cpu);
701
702 /* Ready for use. */
703 unmask_evtchn(evtchn);
704 }
705}
706
707static void restore_cpu_ipis(unsigned int cpu)
708{
709 struct evtchn_bind_ipi bind_ipi;
710 int ipi, irq, evtchn;
711
712 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
713 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
714 continue;
715
716 BUG_ON(irq_info[irq].type != IRQT_IPI);
717 BUG_ON(irq_info[irq].index != ipi);
718
719 /* Get a new binding from Xen. */
720 bind_ipi.vcpu = cpu;
721 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
722 &bind_ipi) != 0)
723 BUG();
724 evtchn = bind_ipi.port;
725
726 /* Record the new mapping. */
727 evtchn_to_irq[evtchn] = irq;
728 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
729 bind_evtchn_to_cpu(evtchn, cpu);
730
731 /* Ready for use. */
732 unmask_evtchn(evtchn);
733
734 }
735}
736
737void xen_irq_resume(void)
738{
739 unsigned int cpu, irq, evtchn;
740
741 init_evtchn_cpu_bindings();
742
743 /* New event-channel space is not 'live' yet. */
744 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
745 mask_evtchn(evtchn);
746
747 /* No IRQ <-> event-channel mappings. */
748 for (irq = 0; irq < NR_IRQS; irq++)
749 irq_info[irq].evtchn = 0; /* zap event-channel binding */
750
751 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
752 evtchn_to_irq[evtchn] = -1;
753
754 for_each_possible_cpu(cpu) {
755 restore_cpu_virqs(cpu);
756 restore_cpu_ipis(cpu);
757 }
758}
759
650static struct irq_chip xen_dynamic_chip __read_mostly = { 760static struct irq_chip xen_dynamic_chip __read_mostly = {
651 .name = "xen-dyn", 761 .name = "xen-dyn",
652 .mask = disable_dynirq, 762 .mask = disable_dynirq,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 52b6b41b909d..e9e11168616a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -471,14 +471,14 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
471 return 0; 471 return 0;
472} 472}
473 473
474static int gnttab_resume(void) 474int gnttab_resume(void)
475{ 475{
476 if (max_nr_grant_frames() < nr_grant_frames) 476 if (max_nr_grant_frames() < nr_grant_frames)
477 return -ENOSYS; 477 return -ENOSYS;
478 return gnttab_map(0, nr_grant_frames - 1); 478 return gnttab_map(0, nr_grant_frames - 1);
479} 479}
480 480
481static int gnttab_suspend(void) 481int gnttab_suspend(void)
482{ 482{
483 arch_gnttab_unmap_shared(shared, nr_grant_frames); 483 arch_gnttab_unmap_shared(shared, nr_grant_frames);
484 return 0; 484 return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
new file mode 100644
index 000000000000..5b546e365f00
--- /dev/null
+++ b/drivers/xen/manage.c
@@ -0,0 +1,252 @@
1/*
2 * Handle extern requests for shutdown, reboot and sysrq
3 */
4#include <linux/kernel.h>
5#include <linux/err.h>
6#include <linux/reboot.h>
7#include <linux/sysrq.h>
8#include <linux/stop_machine.h>
9#include <linux/freezer.h>
10
11#include <xen/xenbus.h>
12#include <xen/grant_table.h>
13#include <xen/events.h>
14#include <xen/hvc-console.h>
15#include <xen/xen-ops.h>
16
17#include <asm/xen/hypercall.h>
18#include <asm/xen/page.h>
19
20enum shutdown_state {
21 SHUTDOWN_INVALID = -1,
22 SHUTDOWN_POWEROFF = 0,
23 SHUTDOWN_SUSPEND = 2,
24 /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
25 report a crash, not be instructed to crash!
26 HALT is the same as POWEROFF, as far as we're concerned. The tools use
27 the distinction when we return the reason code to them. */
28 SHUTDOWN_HALT = 4,
29};
30
31/* Ignore multiple shutdown requests. */
32static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
33
34#ifdef CONFIG_PM_SLEEP
35static int xen_suspend(void *data)
36{
37 int *cancelled = data;
38 int err;
39
40 BUG_ON(!irqs_disabled());
41
42 load_cr3(swapper_pg_dir);
43
44 err = device_power_down(PMSG_SUSPEND);
45 if (err) {
46 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
47 err);
48 return err;
49 }
50
51 xen_mm_pin_all();
52 gnttab_suspend();
53 xen_pre_suspend();
54
55 /*
56 * This hypercall returns 1 if suspend was cancelled
57 * or the domain was merely checkpointed, and 0 if it
58 * is resuming in a new domain.
59 */
60 *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
61
62 xen_post_suspend(*cancelled);
63 gnttab_resume();
64 xen_mm_unpin_all();
65
66 device_power_up();
67
68 if (!*cancelled) {
69 xen_irq_resume();
70 xen_console_resume();
71 }
72
73 return 0;
74}
75
76static void do_suspend(void)
77{
78 int err;
79 int cancelled = 1;
80
81 shutting_down = SHUTDOWN_SUSPEND;
82
83#ifdef CONFIG_PREEMPT
84 /* If the kernel is preemptible, we need to freeze all the processes
85 to prevent them from being in the middle of a pagetable update
86 during suspend. */
87 err = freeze_processes();
88 if (err) {
89 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
90 return;
91 }
92#endif
93
94 err = device_suspend(PMSG_SUSPEND);
95 if (err) {
96 printk(KERN_ERR "xen suspend: device_suspend %d\n", err);
97 goto out;
98 }
99
100 printk("suspending xenbus...\n");
101 /* XXX use normal device tree? */
102 xenbus_suspend();
103
104 err = stop_machine_run(xen_suspend, &cancelled, 0);
105 if (err) {
106 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
107 goto out;
108 }
109
110 if (!cancelled)
111 xenbus_resume();
112 else
113 xenbus_suspend_cancel();
114
115 device_resume();
116
117 /* Make sure timer events get retriggered on all CPUs */
118 clock_was_set();
119out:
120#ifdef CONFIG_PREEMPT
121 thaw_processes();
122#endif
123 shutting_down = SHUTDOWN_INVALID;
124}
125#endif /* CONFIG_PM_SLEEP */
126
127static void shutdown_handler(struct xenbus_watch *watch,
128 const char **vec, unsigned int len)
129{
130 char *str;
131 struct xenbus_transaction xbt;
132 int err;
133
134 if (shutting_down != SHUTDOWN_INVALID)
135 return;
136
137 again:
138 err = xenbus_transaction_start(&xbt);
139 if (err)
140 return;
141
142 str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
143 /* Ignore read errors and empty reads. */
144 if (XENBUS_IS_ERR_READ(str)) {
145 xenbus_transaction_end(xbt, 1);
146 return;
147 }
148
149 xenbus_write(xbt, "control", "shutdown", "");
150
151 err = xenbus_transaction_end(xbt, 0);
152 if (err == -EAGAIN) {
153 kfree(str);
154 goto again;
155 }
156
157 if (strcmp(str, "poweroff") == 0 ||
158 strcmp(str, "halt") == 0) {
159 shutting_down = SHUTDOWN_POWEROFF;
160 orderly_poweroff(false);
161 } else if (strcmp(str, "reboot") == 0) {
162 shutting_down = SHUTDOWN_POWEROFF; /* ? */
163 ctrl_alt_del();
164#ifdef CONFIG_PM_SLEEP
165 } else if (strcmp(str, "suspend") == 0) {
166 do_suspend();
167#endif
168 } else {
169 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
170 shutting_down = SHUTDOWN_INVALID;
171 }
172
173 kfree(str);
174}
175
176static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
177 unsigned int len)
178{
179 char sysrq_key = '\0';
180 struct xenbus_transaction xbt;
181 int err;
182
183 again:
184 err = xenbus_transaction_start(&xbt);
185 if (err)
186 return;
187 if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
188 printk(KERN_ERR "Unable to read sysrq code in "
189 "control/sysrq\n");
190 xenbus_transaction_end(xbt, 1);
191 return;
192 }
193
194 if (sysrq_key != '\0')
195 xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
196
197 err = xenbus_transaction_end(xbt, 0);
198 if (err == -EAGAIN)
199 goto again;
200
201 if (sysrq_key != '\0')
202 handle_sysrq(sysrq_key, NULL);
203}
204
205static struct xenbus_watch shutdown_watch = {
206 .node = "control/shutdown",
207 .callback = shutdown_handler
208};
209
210static struct xenbus_watch sysrq_watch = {
211 .node = "control/sysrq",
212 .callback = sysrq_handler
213};
214
215static int setup_shutdown_watcher(void)
216{
217 int err;
218
219 err = register_xenbus_watch(&shutdown_watch);
220 if (err) {
221 printk(KERN_ERR "Failed to set shutdown watcher\n");
222 return err;
223 }
224
225 err = register_xenbus_watch(&sysrq_watch);
226 if (err) {
227 printk(KERN_ERR "Failed to set sysrq watcher\n");
228 return err;
229 }
230
231 return 0;
232}
233
234static int shutdown_event(struct notifier_block *notifier,
235 unsigned long event,
236 void *data)
237{
238 setup_shutdown_watcher();
239 return NOTIFY_DONE;
240}
241
242static int __init setup_shutdown_event(void)
243{
244 static struct notifier_block xenstore_notifier = {
245 .notifier_call = shutdown_event
246 };
247 register_xenstore_notifier(&xenstore_notifier);
248
249 return 0;
250}
251
252subsys_initcall(setup_shutdown_event);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 0f86b0ff7879..9678b3e98c63 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -117,7 +117,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
117 char *path; 117 char *path;
118 118
119 va_start(ap, pathfmt); 119 va_start(ap, pathfmt);
120 path = kvasprintf(GFP_KERNEL, pathfmt, ap); 120 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
121 va_end(ap); 121 va_end(ap);
122 122
123 if (!path) { 123 if (!path) {
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 6efbe3f29ca5..090c61ee8fd0 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -203,7 +203,6 @@ int xb_read(void *data, unsigned len)
203int xb_init_comms(void) 203int xb_init_comms(void)
204{ 204{
205 struct xenstore_domain_interface *intf = xen_store_interface; 205 struct xenstore_domain_interface *intf = xen_store_interface;
206 int err;
207 206
208 if (intf->req_prod != intf->req_cons) 207 if (intf->req_prod != intf->req_cons)
209 printk(KERN_ERR "XENBUS request ring is not quiescent " 208 printk(KERN_ERR "XENBUS request ring is not quiescent "
@@ -216,18 +215,20 @@ int xb_init_comms(void)
216 intf->rsp_cons = intf->rsp_prod; 215 intf->rsp_cons = intf->rsp_prod;
217 } 216 }
218 217
219 if (xenbus_irq) 218 if (xenbus_irq) {
220 unbind_from_irqhandler(xenbus_irq, &xb_waitq); 219 /* Already have an irq; assume we're resuming */
220 rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
221 } else {
222 int err;
223 err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 }
221 229
222 err = bind_evtchn_to_irqhandler( 230 xenbus_irq = err;
223 xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 } 231 }
229 232
230 xenbus_irq = err;
231
232 return 0; 233 return 0;
233} 234}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 227d53b12a5c..7f2f91c0e11d 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -283,9 +283,9 @@ static char *join(const char *dir, const char *name)
283 char *buffer; 283 char *buffer;
284 284
285 if (strlen(name) == 0) 285 if (strlen(name) == 0)
286 buffer = kasprintf(GFP_KERNEL, "%s", dir); 286 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
287 else 287 else
288 buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name); 288 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
289 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; 289 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
290} 290}
291 291
@@ -297,7 +297,7 @@ static char **split(char *strings, unsigned int len, unsigned int *num)
297 *num = count_strings(strings, len); 297 *num = count_strings(strings, len);
298 298
299 /* Transfer to one big alloc for easy freeing. */ 299 /* Transfer to one big alloc for easy freeing. */
300 ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL); 300 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
301 if (!ret) { 301 if (!ret) {
302 kfree(strings); 302 kfree(strings);
303 return ERR_PTR(-ENOMEM); 303 return ERR_PTR(-ENOMEM);
@@ -751,7 +751,7 @@ static int process_msg(void)
751 } 751 }
752 752
753 753
754 msg = kmalloc(sizeof(*msg), GFP_KERNEL); 754 msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
755 if (msg == NULL) { 755 if (msg == NULL) {
756 err = -ENOMEM; 756 err = -ENOMEM;
757 goto out; 757 goto out;
@@ -763,7 +763,7 @@ static int process_msg(void)
763 goto out; 763 goto out;
764 } 764 }
765 765
766 body = kmalloc(msg->hdr.len + 1, GFP_KERNEL); 766 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
767 if (body == NULL) { 767 if (body == NULL) {
768 kfree(msg); 768 kfree(msg);
769 err = -ENOMEM; 769 err = -ENOMEM;