aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/topology.c25
-rw-r--r--drivers/block/DAC960.c157
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/paride/pt.c19
-rw-r--r--drivers/block/pktcdvd.c46
-rw-r--r--drivers/block/xen-blkfront.c48
-rw-r--r--drivers/cdrom/cdrom.c274
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c85
-rw-r--r--drivers/char/drm/Makefile40
-rw-r--r--drivers/char/drm/drm.h694
-rw-r--r--drivers/char/drm/drmP.h1153
-rw-r--r--drivers/char/drm/drm_core.h34
-rw-r--r--drivers/char/drm/drm_hashtab.h67
-rw-r--r--drivers/char/drm/drm_memory.h61
-rw-r--r--drivers/char/drm/drm_memory_debug.h309
-rw-r--r--drivers/char/drm/drm_os_linux.h108
-rw-r--r--drivers/char/drm/drm_pciids.h415
-rw-r--r--drivers/char/drm/drm_sarea.h84
-rw-r--r--drivers/char/drm/drm_sman.h176
-rw-r--r--drivers/char/drm/i810_drm.h281
-rw-r--r--drivers/char/drm/i830_drm.h342
-rw-r--r--drivers/char/drm/i915_drm.h270
-rw-r--r--drivers/char/drm/mga_drm.h417
-rw-r--r--drivers/char/drm/r128_drm.h326
-rw-r--r--drivers/char/drm/radeon_drm.h749
-rw-r--r--drivers/char/drm/savage_drm.h210
-rw-r--r--drivers/char/drm/sis_drm.h67
-rw-r--r--drivers/char/drm/via_drm.h275
-rw-r--r--drivers/char/hvc_xen.c61
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c118
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c23
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c1
-rw-r--r--drivers/crypto/Kconfig26
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/hifn_795x.c367
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1506
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/crypto/talitos.c1597
-rw-r--r--drivers/crypto/talitos.h199
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/firmware/memmap.c205
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig (renamed from drivers/char/drm/Kconfig)0
-rw-r--r--drivers/gpu/drm/Makefile26
-rw-r--r--drivers/gpu/drm/README.drm (renamed from drivers/char/drm/README.drm)0
-rw-r--r--drivers/gpu/drm/ati_pcigart.c (renamed from drivers/char/drm/ati_pcigart.c)0
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c (renamed from drivers/char/drm/drm_agpsupport.c)0
-rw-r--r--drivers/gpu/drm/drm_auth.c (renamed from drivers/char/drm/drm_auth.c)0
-rw-r--r--drivers/gpu/drm/drm_bufs.c (renamed from drivers/char/drm/drm_bufs.c)0
-rw-r--r--drivers/gpu/drm/drm_context.c (renamed from drivers/char/drm/drm_context.c)0
-rw-r--r--drivers/gpu/drm/drm_dma.c (renamed from drivers/char/drm/drm_dma.c)0
-rw-r--r--drivers/gpu/drm/drm_drawable.c (renamed from drivers/char/drm/drm_drawable.c)0
-rw-r--r--drivers/gpu/drm/drm_drv.c (renamed from drivers/char/drm/drm_drv.c)0
-rw-r--r--drivers/gpu/drm/drm_fops.c (renamed from drivers/char/drm/drm_fops.c)0
-rw-r--r--drivers/gpu/drm/drm_hashtab.c (renamed from drivers/char/drm/drm_hashtab.c)0
-rw-r--r--drivers/gpu/drm/drm_ioc32.c (renamed from drivers/char/drm/drm_ioc32.c)0
-rw-r--r--drivers/gpu/drm/drm_ioctl.c (renamed from drivers/char/drm/drm_ioctl.c)0
-rw-r--r--drivers/gpu/drm/drm_irq.c (renamed from drivers/char/drm/drm_irq.c)0
-rw-r--r--drivers/gpu/drm/drm_lock.c (renamed from drivers/char/drm/drm_lock.c)0
-rw-r--r--drivers/gpu/drm/drm_memory.c (renamed from drivers/char/drm/drm_memory.c)0
-rw-r--r--drivers/gpu/drm/drm_mm.c (renamed from drivers/char/drm/drm_mm.c)0
-rw-r--r--drivers/gpu/drm/drm_pci.c (renamed from drivers/char/drm/drm_pci.c)0
-rw-r--r--drivers/gpu/drm/drm_proc.c (renamed from drivers/char/drm/drm_proc.c)0
-rw-r--r--drivers/gpu/drm/drm_scatter.c (renamed from drivers/char/drm/drm_scatter.c)0
-rw-r--r--drivers/gpu/drm/drm_sman.c (renamed from drivers/char/drm/drm_sman.c)0
-rw-r--r--drivers/gpu/drm/drm_stub.c (renamed from drivers/char/drm/drm_stub.c)0
-rw-r--r--drivers/gpu/drm/drm_sysfs.c (renamed from drivers/char/drm/drm_sysfs.c)0
-rw-r--r--drivers/gpu/drm/drm_vm.c (renamed from drivers/char/drm/drm_vm.c)0
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c (renamed from drivers/char/drm/i810_dma.c)0
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c (renamed from drivers/char/drm/i810_drv.c)0
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h (renamed from drivers/char/drm/i810_drv.h)0
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c (renamed from drivers/char/drm/i830_dma.c)0
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c (renamed from drivers/char/drm/i830_drv.c)0
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h (renamed from drivers/char/drm/i830_drv.h)0
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c (renamed from drivers/char/drm/i830_irq.c)0
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c (renamed from drivers/char/drm/i915_dma.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c (renamed from drivers/char/drm/i915_drv.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h (renamed from drivers/char/drm/i915_drv.h)0
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c (renamed from drivers/char/drm/i915_ioc32.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c (renamed from drivers/char/drm/i915_irq.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c (renamed from drivers/char/drm/i915_mem.c)0
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c (renamed from drivers/char/drm/mga_dma.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c (renamed from drivers/char/drm/mga_drv.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h (renamed from drivers/char/drm/mga_drv.h)0
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c (renamed from drivers/char/drm/mga_ioc32.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c (renamed from drivers/char/drm/mga_irq.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_state.c (renamed from drivers/char/drm/mga_state.c)0
-rw-r--r--drivers/gpu/drm/mga/mga_ucode.h (renamed from drivers/char/drm/mga_ucode.h)0
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c (renamed from drivers/char/drm/mga_warp.c)0
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c (renamed from drivers/char/drm/r128_cce.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c (renamed from drivers/char/drm/r128_drv.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h (renamed from drivers/char/drm/r128_drv.h)0
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c (renamed from drivers/char/drm/r128_ioc32.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c (renamed from drivers/char/drm/r128_irq.c)0
-rw-r--r--drivers/gpu/drm/r128/r128_state.c (renamed from drivers/char/drm/r128_state.c)0
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c (renamed from drivers/char/drm/r300_cmdbuf.c)0
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h (renamed from drivers/char/drm/r300_reg.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c (renamed from drivers/char/drm/radeon_cp.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c (renamed from drivers/char/drm/radeon_drv.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h (renamed from drivers/char/drm/radeon_drv.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c (renamed from drivers/char/drm/radeon_ioc32.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c (renamed from drivers/char/drm/radeon_irq.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c (renamed from drivers/char/drm/radeon_mem.c)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_microcode.h (renamed from drivers/char/drm/radeon_microcode.h)0
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c (renamed from drivers/char/drm/radeon_state.c)0
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c (renamed from drivers/char/drm/savage_bci.c)0
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c (renamed from drivers/char/drm/savage_drv.c)0
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h (renamed from drivers/char/drm/savage_drv.h)0
-rw-r--r--drivers/gpu/drm/savage/savage_state.c (renamed from drivers/char/drm/savage_state.c)0
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c (renamed from drivers/char/drm/sis_drv.c)0
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h (renamed from drivers/char/drm/sis_drv.h)0
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c (renamed from drivers/char/drm/sis_mm.c)0
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c (renamed from drivers/char/drm/tdfx_drv.c)0
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.h (renamed from drivers/char/drm/tdfx_drv.h)0
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h (renamed from drivers/char/drm/via_3d_reg.h)0
-rw-r--r--drivers/gpu/drm/via/via_dma.c (renamed from drivers/char/drm/via_dma.c)0
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c (renamed from drivers/char/drm/via_dmablit.c)0
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h (renamed from drivers/char/drm/via_dmablit.h)0
-rw-r--r--drivers/gpu/drm/via/via_drv.c (renamed from drivers/char/drm/via_drv.c)0
-rw-r--r--drivers/gpu/drm/via/via_drv.h (renamed from drivers/char/drm/via_drv.h)0
-rw-r--r--drivers/gpu/drm/via/via_irq.c (renamed from drivers/char/drm/via_irq.c)0
-rw-r--r--drivers/gpu/drm/via/via_map.c (renamed from drivers/char/drm/via_map.c)0
-rw-r--r--drivers/gpu/drm/via/via_mm.c (renamed from drivers/char/drm/via_mm.c)0
-rw-r--r--drivers/gpu/drm/via/via_verifier.c (renamed from drivers/char/drm/via_verifier.c)0
-rw-r--r--drivers/gpu/drm/via/via_verifier.h (renamed from drivers/char/drm/via_verifier.h)0
-rw-r--r--drivers/gpu/drm/via/via_video.c (renamed from drivers/char/drm/via_video.c)0
-rw-r--r--drivers/ide/legacy/ide-cs.c10
-rw-r--r--drivers/input/xen-kbdfront.c20
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c10
-rw-r--r--drivers/misc/atmel_pwm.c2
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c9
-rw-r--r--drivers/net/macb.c37
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/pci/intel-iommu.c51
-rw-r--r--drivers/pcmcia/Kconfig7
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/au1000_generic.h27
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c1
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c1
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c339
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c16
-rw-r--r--drivers/pcmcia/cs.c14
-rw-r--r--drivers/pcmcia/cs_internal.h13
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/hd64465_ss.c3
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82092aa.h2
-rw-r--r--drivers/pcmcia/i82365.c39
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c3
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c154
-rw-r--r--drivers/pcmcia/pcmcia_resource.c81
-rw-r--r--drivers/pcmcia/pxa2xx_base.c1
-rw-r--r--drivers/pcmcia/rsrc_mgr.c86
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c57
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/pcmcia/socket_sysfs.c8
-rw-r--r--drivers/pcmcia/ti113x.h4
-rw-r--r--drivers/rtc/rtc-at32ap700x.c3
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_3990_erp.c15
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_fba.c12
-rw-r--r--drivers/s390/block/dcssblk.c22
-rw-r--r--drivers/s390/block/xpram.c18
-rw-r--r--drivers/s390/char/con3215.c38
-rw-r--r--drivers/s390/char/con3270.c6
-rw-r--r--drivers/s390/char/fs3270.c11
-rw-r--r--drivers/s390/char/monreader.c74
-rw-r--r--drivers/s390/char/raw3270.c28
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp_cmd.c343
-rw-r--r--drivers/s390/char/sclp_con.c5
-rw-r--r--drivers/s390/char/sclp_config.c17
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c57
-rw-r--r--drivers/s390/char/sclp_quiesce.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/sclp_tty.c261
-rw-r--r--drivers/s390/char/sclp_tty.h53
-rw-r--r--drivers/s390/char/sclp_vt220.c62
-rw-r--r--drivers/s390/char/tape_34xx.c12
-rw-r--r--drivers/s390/char/tape_3590.c21
-rw-r--r--drivers/s390/char/tape_core.c15
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/vmcp.c37
-rw-r--r--drivers/s390/char/vmlogrdr.c29
-rw-r--r--drivers/s390/char/vmur.c5
-rw-r--r--drivers/s390/char/vmwatchdog.c16
-rw-r--r--drivers/s390/char/zcore.c31
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/airq.c45
-rw-r--r--drivers/s390/cio/chp.c116
-rw-r--r--drivers/s390/cio/chp.h15
-rw-r--r--drivers/s390/cio/chsc.c379
-rw-r--r--drivers/s390/cio/chsc.h26
-rw-r--r--drivers/s390/cio/chsc_sch.c820
-rw-r--r--drivers/s390/cio/chsc_sch.h13
-rw-r--r--drivers/s390/cio/cio.c282
-rw-r--r--drivers/s390/cio/cio.h14
-rw-r--r--drivers/s390/cio/cmf.c20
-rw-r--r--drivers/s390/cio/css.c283
-rw-r--r--drivers/s390/cio/css.h49
-rw-r--r--drivers/s390/cio/device.c476
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c210
-rw-r--r--drivers/s390/cio/device_id.c16
-rw-r--r--drivers/s390/cio/device_ops.c134
-rw-r--r--drivers/s390/cio/device_pgid.c26
-rw-r--r--drivers/s390/cio/device_status.c133
-rw-r--r--drivers/s390/cio/fcx.c350
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/io_sch.h48
-rw-r--r--drivers/s390/cio/ioasm.h2
-rw-r--r--drivers/s390/cio/isc.c68
-rw-r--r--drivers/s390/cio/itcw.c327
-rw-r--r--drivers/s390/cio/qdio.c35
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/cio/scsw.c843
-rw-r--r--drivers/s390/crypto/ap_bus.c63
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c24
-rw-r--r--drivers/s390/crypto/zcrypt_api.h28
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c15
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c69
-rw-r--r--drivers/s390/net/claw.c77
-rw-r--r--drivers/s390/net/ctcm_fsms.c12
-rw-r--r--drivers/s390/net/ctcm_main.c28
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/cu3088.h3
-rw-r--r--drivers/s390/net/lcs.c44
-rw-r--r--drivers/s390/net/netiucv.c61
-rw-r--r--drivers/s390/net/qeth_core_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/s390/net/smsgiucv.c10
-rw-r--r--drivers/s390/s390mach.c106
-rw-r--r--drivers/s390/s390mach.h10
-rw-r--r--drivers/scsi/sg.c44
-rw-r--r--drivers/scsi/sr.c20
-rw-r--r--drivers/serial/atmel_serial.c17
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/sgivwfb.c3
-rw-r--r--drivers/video/xen-fbfront.c211
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c10
-rw-r--r--drivers/xen/events.c114
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/manage.c252
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c23
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c10
279 files changed, 10240 insertions, 9146 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f65deda72d61..fda44679dffc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
10obj-$(CONFIG_PARISC) += parisc/ 10obj-$(CONFIG_PARISC) += parisc/
11obj-$(CONFIG_RAPIDIO) += rapidio/ 11obj-$(CONFIG_RAPIDIO) += rapidio/
12obj-y += video/ 12obj-y += video/
13obj-y += gpu/
13obj-$(CONFIG_ACPI) += acpi/ 14obj-$(CONFIG_ACPI) += acpi/
14# PnP must come after ACPI since it will eventually need to check if acpi 15# PnP must come after ACPI since it will eventually need to check if acpi
15# was used and do nothing if so 16# was used and do nothing if so
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c52fca833268..bba867391a85 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -4,8 +4,6 @@
4 4
5menuconfig ACPI 5menuconfig ACPI
6 bool "ACPI (Advanced Configuration and Power Interface) Support" 6 bool "ACPI (Advanced Configuration and Power Interface) Support"
7 depends on !X86_NUMAQ
8 depends on !X86_VISWS
9 depends on !IA64_HP_SIM 7 depends on !IA64_HP_SIM
10 depends on IA64 || X86 8 depends on IA64 || X86
11 depends on PCI 9 depends on PCI
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 57a43649a461..499ccc628d81 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -885,7 +885,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
885 /* set the min alignment and padding */ 885 /* set the min alignment and padding */
886 blk_queue_update_dma_alignment(sdev->request_queue, 886 blk_queue_update_dma_alignment(sdev->request_queue,
887 ATA_DMA_PAD_SZ - 1); 887 ATA_DMA_PAD_SZ - 1);
888 blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); 888 blk_queue_update_dma_pad(sdev->request_queue,
889 ATA_DMA_PAD_SZ - 1);
889 890
890 /* configure draining */ 891 /* configure draining */
891 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 892 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2b4b392dcbc1..87a7f1d02578 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -153,7 +153,7 @@ EXPORT_SYMBOL(set_trace_device);
153 * it's not any guarantee, but it's a high _likelihood_ that 153 * it's not any guarantee, but it's a high _likelihood_ that
154 * the match is valid). 154 * the match is valid).
155 */ 155 */
156void generate_resume_trace(void *tracedata, unsigned int user) 156void generate_resume_trace(const void *tracedata, unsigned int user)
157{ 157{
158 unsigned short lineno = *(unsigned short *)tracedata; 158 unsigned short lineno = *(unsigned short *)tracedata;
159 const char *file = *(const char **)(tracedata + 2); 159 const char *file = *(const char **)(tracedata + 2);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fdf4044d2e74..1efe162e16d7 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -40,6 +40,7 @@ static ssize_t show_##name(struct sys_device *dev, char *buf) \
40 return sprintf(buf, "%d\n", topology_##name(cpu)); \ 40 return sprintf(buf, "%d\n", topology_##name(cpu)); \
41} 41}
42 42
43#if defined(topology_thread_siblings) || defined(topology_core_siblings)
43static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) 44static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
44{ 45{
45 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; 46 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -54,21 +55,41 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
54 } 55 }
55 return n; 56 return n;
56} 57}
58#endif
57 59
60#ifdef arch_provides_topology_pointers
58#define define_siblings_show_map(name) \ 61#define define_siblings_show_map(name) \
59static inline ssize_t show_##name(struct sys_device *dev, char *buf) \ 62static ssize_t show_##name(struct sys_device *dev, char *buf) \
60{ \ 63{ \
61 unsigned int cpu = dev->id; \ 64 unsigned int cpu = dev->id; \
62 return show_cpumap(0, &(topology_##name(cpu)), buf); \ 65 return show_cpumap(0, &(topology_##name(cpu)), buf); \
63} 66}
64 67
65#define define_siblings_show_list(name) \ 68#define define_siblings_show_list(name) \
66static inline ssize_t show_##name##_list(struct sys_device *dev, char *buf) \ 69static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
67{ \ 70{ \
68 unsigned int cpu = dev->id; \ 71 unsigned int cpu = dev->id; \
69 return show_cpumap(1, &(topology_##name(cpu)), buf); \ 72 return show_cpumap(1, &(topology_##name(cpu)), buf); \
70} 73}
71 74
75#else
76#define define_siblings_show_map(name) \
77static ssize_t show_##name(struct sys_device *dev, char *buf) \
78{ \
79 unsigned int cpu = dev->id; \
80 cpumask_t mask = topology_##name(cpu); \
81 return show_cpumap(0, &mask, buf); \
82}
83
84#define define_siblings_show_list(name) \
85static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
86{ \
87 unsigned int cpu = dev->id; \
88 cpumask_t mask = topology_##name(cpu); \
89 return show_cpumap(1, &mask, buf); \
90}
91#endif
92
72#define define_siblings_show_func(name) \ 93#define define_siblings_show_func(name) \
73 define_siblings_show_map(name); define_siblings_show_list(name) 94 define_siblings_show_map(name); define_siblings_show_list(name)
74 95
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index cd03473f3547..a002a381df92 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6628,15 +6628,18 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
6628 * DAC960_gam_ioctl is the ioctl function for performing RAID operations. 6628 * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
6629*/ 6629*/
6630 6630
6631static int DAC960_gam_ioctl(struct inode *inode, struct file *file, 6631static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
6632 unsigned int Request, unsigned long Argument) 6632 unsigned long Argument)
6633{ 6633{
6634 int ErrorCode = 0; 6634 long ErrorCode = 0;
6635 if (!capable(CAP_SYS_ADMIN)) return -EACCES; 6635 if (!capable(CAP_SYS_ADMIN)) return -EACCES;
6636
6637 lock_kernel();
6636 switch (Request) 6638 switch (Request)
6637 { 6639 {
6638 case DAC960_IOCTL_GET_CONTROLLER_COUNT: 6640 case DAC960_IOCTL_GET_CONTROLLER_COUNT:
6639 return DAC960_ControllerCount; 6641 ErrorCode = DAC960_ControllerCount;
6642 break;
6640 case DAC960_IOCTL_GET_CONTROLLER_INFO: 6643 case DAC960_IOCTL_GET_CONTROLLER_INFO:
6641 { 6644 {
6642 DAC960_ControllerInfo_T __user *UserSpaceControllerInfo = 6645 DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
@@ -6644,15 +6647,20 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6644 DAC960_ControllerInfo_T ControllerInfo; 6647 DAC960_ControllerInfo_T ControllerInfo;
6645 DAC960_Controller_T *Controller; 6648 DAC960_Controller_T *Controller;
6646 int ControllerNumber; 6649 int ControllerNumber;
6647 if (UserSpaceControllerInfo == NULL) return -EINVAL; 6650 if (UserSpaceControllerInfo == NULL)
6648 ErrorCode = get_user(ControllerNumber, 6651 ErrorCode = -EINVAL;
6652 else ErrorCode = get_user(ControllerNumber,
6649 &UserSpaceControllerInfo->ControllerNumber); 6653 &UserSpaceControllerInfo->ControllerNumber);
6650 if (ErrorCode != 0) return ErrorCode; 6654 if (ErrorCode != 0)
6655 break;;
6656 ErrorCode = -ENXIO;
6651 if (ControllerNumber < 0 || 6657 if (ControllerNumber < 0 ||
6652 ControllerNumber > DAC960_ControllerCount - 1) 6658 ControllerNumber > DAC960_ControllerCount - 1) {
6653 return -ENXIO; 6659 break;
6660 }
6654 Controller = DAC960_Controllers[ControllerNumber]; 6661 Controller = DAC960_Controllers[ControllerNumber];
6655 if (Controller == NULL) return -ENXIO; 6662 if (Controller == NULL)
6663 break;;
6656 memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T)); 6664 memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
6657 ControllerInfo.ControllerNumber = ControllerNumber; 6665 ControllerInfo.ControllerNumber = ControllerNumber;
6658 ControllerInfo.FirmwareType = Controller->FirmwareType; 6666 ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6665,8 +6673,9 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6665 ControllerInfo.PCI_Address = Controller->PCI_Address; 6673 ControllerInfo.PCI_Address = Controller->PCI_Address;
6666 strcpy(ControllerInfo.ModelName, Controller->ModelName); 6674 strcpy(ControllerInfo.ModelName, Controller->ModelName);
6667 strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion); 6675 strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
6668 return (copy_to_user(UserSpaceControllerInfo, &ControllerInfo, 6676 ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
6669 sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0); 6677 sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
6678 break;
6670 } 6679 }
6671 case DAC960_IOCTL_V1_EXECUTE_COMMAND: 6680 case DAC960_IOCTL_V1_EXECUTE_COMMAND:
6672 { 6681 {
@@ -6684,30 +6693,39 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6684 int ControllerNumber, DataTransferLength; 6693 int ControllerNumber, DataTransferLength;
6685 unsigned char *DataTransferBuffer = NULL; 6694 unsigned char *DataTransferBuffer = NULL;
6686 dma_addr_t DataTransferBufferDMA; 6695 dma_addr_t DataTransferBufferDMA;
6687 if (UserSpaceUserCommand == NULL) return -EINVAL; 6696 if (UserSpaceUserCommand == NULL) {
6697 ErrorCode = -EINVAL;
6698 break;
6699 }
6688 if (copy_from_user(&UserCommand, UserSpaceUserCommand, 6700 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6689 sizeof(DAC960_V1_UserCommand_T))) { 6701 sizeof(DAC960_V1_UserCommand_T))) {
6690 ErrorCode = -EFAULT; 6702 ErrorCode = -EFAULT;
6691 goto Failure1a; 6703 break;
6692 } 6704 }
6693 ControllerNumber = UserCommand.ControllerNumber; 6705 ControllerNumber = UserCommand.ControllerNumber;
6706 ErrorCode = -ENXIO;
6694 if (ControllerNumber < 0 || 6707 if (ControllerNumber < 0 ||
6695 ControllerNumber > DAC960_ControllerCount - 1) 6708 ControllerNumber > DAC960_ControllerCount - 1)
6696 return -ENXIO; 6709 break;
6697 Controller = DAC960_Controllers[ControllerNumber]; 6710 Controller = DAC960_Controllers[ControllerNumber];
6698 if (Controller == NULL) return -ENXIO; 6711 if (Controller == NULL)
6699 if (Controller->FirmwareType != DAC960_V1_Controller) return -EINVAL; 6712 break;
6713 ErrorCode = -EINVAL;
6714 if (Controller->FirmwareType != DAC960_V1_Controller)
6715 break;
6700 CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode; 6716 CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
6701 DataTransferLength = UserCommand.DataTransferLength; 6717 DataTransferLength = UserCommand.DataTransferLength;
6702 if (CommandOpcode & 0x80) return -EINVAL; 6718 if (CommandOpcode & 0x80)
6719 break;
6703 if (CommandOpcode == DAC960_V1_DCDB) 6720 if (CommandOpcode == DAC960_V1_DCDB)
6704 { 6721 {
6705 if (copy_from_user(&DCDB, UserCommand.DCDB, 6722 if (copy_from_user(&DCDB, UserCommand.DCDB,
6706 sizeof(DAC960_V1_DCDB_T))) { 6723 sizeof(DAC960_V1_DCDB_T))) {
6707 ErrorCode = -EFAULT; 6724 ErrorCode = -EFAULT;
6708 goto Failure1a; 6725 break;
6709 } 6726 }
6710 if (DCDB.Channel >= DAC960_V1_MaxChannels) return -EINVAL; 6727 if (DCDB.Channel >= DAC960_V1_MaxChannels)
6728 break;
6711 if (!((DataTransferLength == 0 && 6729 if (!((DataTransferLength == 0 &&
6712 DCDB.Direction 6730 DCDB.Direction
6713 == DAC960_V1_DCDB_NoDataTransfer) || 6731 == DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6717,38 +6735,37 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6717 (DataTransferLength < 0 && 6735 (DataTransferLength < 0 &&
6718 DCDB.Direction 6736 DCDB.Direction
6719 == DAC960_V1_DCDB_DataTransferSystemToDevice))) 6737 == DAC960_V1_DCDB_DataTransferSystemToDevice)))
6720 return -EINVAL; 6738 break;
6721 if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength) 6739 if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
6722 != abs(DataTransferLength)) 6740 != abs(DataTransferLength))
6723 return -EINVAL; 6741 break;
6724 DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice, 6742 DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
6725 sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA); 6743 sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
6726 if (DCDB_IOBUF == NULL) 6744 if (DCDB_IOBUF == NULL) {
6727 return -ENOMEM; 6745 ErrorCode = -ENOMEM;
6746 break;
6747 }
6728 } 6748 }
6749 ErrorCode = -ENOMEM;
6729 if (DataTransferLength > 0) 6750 if (DataTransferLength > 0)
6730 { 6751 {
6731 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6752 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6732 DataTransferLength, &DataTransferBufferDMA); 6753 DataTransferLength, &DataTransferBufferDMA);
6733 if (DataTransferBuffer == NULL) { 6754 if (DataTransferBuffer == NULL)
6734 ErrorCode = -ENOMEM; 6755 break;
6735 goto Failure1;
6736 }
6737 memset(DataTransferBuffer, 0, DataTransferLength); 6756 memset(DataTransferBuffer, 0, DataTransferLength);
6738 } 6757 }
6739 else if (DataTransferLength < 0) 6758 else if (DataTransferLength < 0)
6740 { 6759 {
6741 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6760 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6742 -DataTransferLength, &DataTransferBufferDMA); 6761 -DataTransferLength, &DataTransferBufferDMA);
6743 if (DataTransferBuffer == NULL) { 6762 if (DataTransferBuffer == NULL)
6744 ErrorCode = -ENOMEM; 6763 break;
6745 goto Failure1;
6746 }
6747 if (copy_from_user(DataTransferBuffer, 6764 if (copy_from_user(DataTransferBuffer,
6748 UserCommand.DataTransferBuffer, 6765 UserCommand.DataTransferBuffer,
6749 -DataTransferLength)) { 6766 -DataTransferLength)) {
6750 ErrorCode = -EFAULT; 6767 ErrorCode = -EFAULT;
6751 goto Failure1; 6768 break;
6752 } 6769 }
6753 } 6770 }
6754 if (CommandOpcode == DAC960_V1_DCDB) 6771 if (CommandOpcode == DAC960_V1_DCDB)
@@ -6825,8 +6842,7 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6825 if (DCDB_IOBUF != NULL) 6842 if (DCDB_IOBUF != NULL)
6826 pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T), 6843 pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
6827 DCDB_IOBUF, DCDB_IOBUFDMA); 6844 DCDB_IOBUF, DCDB_IOBUFDMA);
6828 Failure1a: 6845 break;
6829 return ErrorCode;
6830 } 6846 }
6831 case DAC960_IOCTL_V2_EXECUTE_COMMAND: 6847 case DAC960_IOCTL_V2_EXECUTE_COMMAND:
6832 { 6848 {
@@ -6844,32 +6860,43 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6844 dma_addr_t DataTransferBufferDMA; 6860 dma_addr_t DataTransferBufferDMA;
6845 unsigned char *RequestSenseBuffer = NULL; 6861 unsigned char *RequestSenseBuffer = NULL;
6846 dma_addr_t RequestSenseBufferDMA; 6862 dma_addr_t RequestSenseBufferDMA;
6847 if (UserSpaceUserCommand == NULL) return -EINVAL; 6863
6864 ErrorCode = -EINVAL;
6865 if (UserSpaceUserCommand == NULL)
6866 break;
6848 if (copy_from_user(&UserCommand, UserSpaceUserCommand, 6867 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6849 sizeof(DAC960_V2_UserCommand_T))) { 6868 sizeof(DAC960_V2_UserCommand_T))) {
6850 ErrorCode = -EFAULT; 6869 ErrorCode = -EFAULT;
6851 goto Failure2a; 6870 break;
6852 } 6871 }
6872 ErrorCode = -ENXIO;
6853 ControllerNumber = UserCommand.ControllerNumber; 6873 ControllerNumber = UserCommand.ControllerNumber;
6854 if (ControllerNumber < 0 || 6874 if (ControllerNumber < 0 ||
6855 ControllerNumber > DAC960_ControllerCount - 1) 6875 ControllerNumber > DAC960_ControllerCount - 1)
6856 return -ENXIO; 6876 break;
6857 Controller = DAC960_Controllers[ControllerNumber]; 6877 Controller = DAC960_Controllers[ControllerNumber];
6858 if (Controller == NULL) return -ENXIO; 6878 if (Controller == NULL)
6859 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL; 6879 break;
6880 if (Controller->FirmwareType != DAC960_V2_Controller){
6881 ErrorCode = -EINVAL;
6882 break;
6883 }
6860 DataTransferLength = UserCommand.DataTransferLength; 6884 DataTransferLength = UserCommand.DataTransferLength;
6885 ErrorCode = -ENOMEM;
6861 if (DataTransferLength > 0) 6886 if (DataTransferLength > 0)
6862 { 6887 {
6863 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6888 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6864 DataTransferLength, &DataTransferBufferDMA); 6889 DataTransferLength, &DataTransferBufferDMA);
6865 if (DataTransferBuffer == NULL) return -ENOMEM; 6890 if (DataTransferBuffer == NULL)
6891 break;
6866 memset(DataTransferBuffer, 0, DataTransferLength); 6892 memset(DataTransferBuffer, 0, DataTransferLength);
6867 } 6893 }
6868 else if (DataTransferLength < 0) 6894 else if (DataTransferLength < 0)
6869 { 6895 {
6870 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, 6896 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6871 -DataTransferLength, &DataTransferBufferDMA); 6897 -DataTransferLength, &DataTransferBufferDMA);
6872 if (DataTransferBuffer == NULL) return -ENOMEM; 6898 if (DataTransferBuffer == NULL)
6899 break;
6873 if (copy_from_user(DataTransferBuffer, 6900 if (copy_from_user(DataTransferBuffer,
6874 UserCommand.DataTransferBuffer, 6901 UserCommand.DataTransferBuffer,
6875 -DataTransferLength)) { 6902 -DataTransferLength)) {
@@ -6979,8 +7006,7 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6979 if (RequestSenseBuffer != NULL) 7006 if (RequestSenseBuffer != NULL)
6980 pci_free_consistent(Controller->PCIDevice, RequestSenseLength, 7007 pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
6981 RequestSenseBuffer, RequestSenseBufferDMA); 7008 RequestSenseBuffer, RequestSenseBufferDMA);
6982 Failure2a: 7009 break;
6983 return ErrorCode;
6984 } 7010 }
6985 case DAC960_IOCTL_V2_GET_HEALTH_STATUS: 7011 case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
6986 { 7012 {
@@ -6990,21 +7016,33 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6990 DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer; 7016 DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
6991 DAC960_Controller_T *Controller; 7017 DAC960_Controller_T *Controller;
6992 int ControllerNumber; 7018 int ControllerNumber;
6993 if (UserSpaceGetHealthStatus == NULL) return -EINVAL; 7019 if (UserSpaceGetHealthStatus == NULL) {
7020 ErrorCode = -EINVAL;
7021 break;
7022 }
6994 if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus, 7023 if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
6995 sizeof(DAC960_V2_GetHealthStatus_T))) 7024 sizeof(DAC960_V2_GetHealthStatus_T))) {
6996 return -EFAULT; 7025 ErrorCode = -EFAULT;
7026 break;
7027 }
7028 ErrorCode = -ENXIO;
6997 ControllerNumber = GetHealthStatus.ControllerNumber; 7029 ControllerNumber = GetHealthStatus.ControllerNumber;
6998 if (ControllerNumber < 0 || 7030 if (ControllerNumber < 0 ||
6999 ControllerNumber > DAC960_ControllerCount - 1) 7031 ControllerNumber > DAC960_ControllerCount - 1)
7000 return -ENXIO; 7032 break;
7001 Controller = DAC960_Controllers[ControllerNumber]; 7033 Controller = DAC960_Controllers[ControllerNumber];
7002 if (Controller == NULL) return -ENXIO; 7034 if (Controller == NULL)
7003 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL; 7035 break;
7036 if (Controller->FirmwareType != DAC960_V2_Controller) {
7037 ErrorCode = -EINVAL;
7038 break;
7039 }
7004 if (copy_from_user(&HealthStatusBuffer, 7040 if (copy_from_user(&HealthStatusBuffer,
7005 GetHealthStatus.HealthStatusBuffer, 7041 GetHealthStatus.HealthStatusBuffer,
7006 sizeof(DAC960_V2_HealthStatusBuffer_T))) 7042 sizeof(DAC960_V2_HealthStatusBuffer_T))) {
7007 return -EFAULT; 7043 ErrorCode = -EFAULT;
7044 break;
7045 }
7008 while (Controller->V2.HealthStatusBuffer->StatusChangeCounter 7046 while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
7009 == HealthStatusBuffer.StatusChangeCounter && 7047 == HealthStatusBuffer.StatusChangeCounter &&
7010 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber 7048 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
@@ -7012,21 +7050,28 @@ static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
7012 { 7050 {
7013 interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue, 7051 interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
7014 DAC960_MonitoringTimerInterval); 7052 DAC960_MonitoringTimerInterval);
7015 if (signal_pending(current)) return -EINTR; 7053 if (signal_pending(current)) {
7054 ErrorCode = -EINTR;
7055 break;
7056 }
7016 } 7057 }
7017 if (copy_to_user(GetHealthStatus.HealthStatusBuffer, 7058 if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
7018 Controller->V2.HealthStatusBuffer, 7059 Controller->V2.HealthStatusBuffer,
7019 sizeof(DAC960_V2_HealthStatusBuffer_T))) 7060 sizeof(DAC960_V2_HealthStatusBuffer_T)))
7020 return -EFAULT; 7061 ErrorCode = -EFAULT;
7021 return 0; 7062 else
7063 ErrorCode = 0;
7022 } 7064 }
7065 default:
7066 ErrorCode = -ENOTTY;
7023 } 7067 }
7024 return -EINVAL; 7068 unlock_kernel();
7069 return ErrorCode;
7025} 7070}
7026 7071
7027static const struct file_operations DAC960_gam_fops = { 7072static const struct file_operations DAC960_gam_fops = {
7028 .owner = THIS_MODULE, 7073 .owner = THIS_MODULE,
7029 .ioctl = DAC960_gam_ioctl 7074 .unlocked_ioctl = DAC960_gam_ioctl
7030}; 7075};
7031 7076
7032static struct miscdevice DAC960_gam_dev = { 7077static struct miscdevice DAC960_gam_dev = {
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 41f818be2f7e..2f1746295d06 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1003,7 +1003,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
1003 * Enough people have their dip switches set backwards to 1003 * Enough people have their dip switches set backwards to
1004 * warrant a loud message for this special case. 1004 * warrant a loud message for this special case.
1005 */ 1005 */
1006 aoemajor = be16_to_cpu(get_unaligned(&h->major)); 1006 aoemajor = get_unaligned_be16(&h->major);
1007 if (aoemajor == 0xfff) { 1007 if (aoemajor == 0xfff) {
1008 printk(KERN_ERR "aoe: Warning: shelf address is all ones. " 1008 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1009 "Check shelf dip switches.\n"); 1009 "Check shelf dip switches.\n");
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 314333db16ee..5c74c3574a5a 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -190,8 +190,7 @@ module_param_array(drive3, int, NULL, 0);
190#define ATAPI_LOG_SENSE 0x4d 190#define ATAPI_LOG_SENSE 0x4d
191 191
192static int pt_open(struct inode *inode, struct file *file); 192static int pt_open(struct inode *inode, struct file *file);
193static int pt_ioctl(struct inode *inode, struct file *file, 193static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
194 unsigned int cmd, unsigned long arg);
195static int pt_release(struct inode *inode, struct file *file); 194static int pt_release(struct inode *inode, struct file *file);
196static ssize_t pt_read(struct file *filp, char __user *buf, 195static ssize_t pt_read(struct file *filp, char __user *buf,
197 size_t count, loff_t * ppos); 196 size_t count, loff_t * ppos);
@@ -237,7 +236,7 @@ static const struct file_operations pt_fops = {
237 .owner = THIS_MODULE, 236 .owner = THIS_MODULE,
238 .read = pt_read, 237 .read = pt_read,
239 .write = pt_write, 238 .write = pt_write,
240 .ioctl = pt_ioctl, 239 .unlocked_ioctl = pt_ioctl,
241 .open = pt_open, 240 .open = pt_open,
242 .release = pt_release, 241 .release = pt_release,
243}; 242};
@@ -691,8 +690,7 @@ out:
691 return err; 690 return err;
692} 691}
693 692
694static int pt_ioctl(struct inode *inode, struct file *file, 693static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
695 unsigned int cmd, unsigned long arg)
696{ 694{
697 struct pt_unit *tape = file->private_data; 695 struct pt_unit *tape = file->private_data;
698 struct mtop __user *p = (void __user *)arg; 696 struct mtop __user *p = (void __user *)arg;
@@ -706,23 +704,26 @@ static int pt_ioctl(struct inode *inode, struct file *file,
706 switch (mtop.mt_op) { 704 switch (mtop.mt_op) {
707 705
708 case MTREW: 706 case MTREW:
707 lock_kernel();
709 pt_rewind(tape); 708 pt_rewind(tape);
709 unlock_kernel();
710 return 0; 710 return 0;
711 711
712 case MTWEOF: 712 case MTWEOF:
713 lock_kernel();
713 pt_write_fm(tape); 714 pt_write_fm(tape);
715 unlock_kernel();
714 return 0; 716 return 0;
715 717
716 default: 718 default:
717 printk("%s: Unimplemented mt_op %d\n", tape->name, 719 /* FIXME: rate limit ?? */
720 printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
718 mtop.mt_op); 721 mtop.mt_op);
719 return -EINVAL; 722 return -EINVAL;
720 } 723 }
721 724
722 default: 725 default:
723 printk("%s: Unimplemented ioctl 0x%x\n", tape->name, cmd); 726 return -ENOTTY;
724 return -EINVAL;
725
726 } 727 }
727} 728}
728 729
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3ba1df93e9e3..45bee918c46a 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -49,6 +49,7 @@
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/smp_lock.h>
52#include <linux/errno.h> 53#include <linux/errno.h>
53#include <linux/spinlock.h> 54#include <linux/spinlock.h>
54#include <linux/file.h> 55#include <linux/file.h>
@@ -2079,7 +2080,6 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
2079 unsigned char buf[64]; 2080 unsigned char buf[64];
2080 int ret; 2081 int ret;
2081 2082
2082 memset(buf, 0, sizeof(buf));
2083 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 2083 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
2084 cgc.sense = &sense; 2084 cgc.sense = &sense;
2085 cgc.buflen = pd->mode_offset + 12; 2085 cgc.buflen = pd->mode_offset + 12;
@@ -2126,7 +2126,6 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
2126 unsigned char *cap_buf; 2126 unsigned char *cap_buf;
2127 int ret, offset; 2127 int ret, offset;
2128 2128
2129 memset(buf, 0, sizeof(buf));
2130 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; 2129 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
2131 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); 2130 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
2132 cgc.sense = &sense; 2131 cgc.sense = &sense;
@@ -2633,11 +2632,12 @@ end_io:
2633 2632
2634 2633
2635 2634
2636static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec) 2635static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2636 struct bio_vec *bvec)
2637{ 2637{
2638 struct pktcdvd_device *pd = q->queuedata; 2638 struct pktcdvd_device *pd = q->queuedata;
2639 sector_t zone = ZONE(bio->bi_sector, pd); 2639 sector_t zone = ZONE(bmd->bi_sector, pd);
2640 int used = ((bio->bi_sector - zone) << 9) + bio->bi_size; 2640 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2641 int remaining = (pd->settings.size << 9) - used; 2641 int remaining = (pd->settings.size << 9) - used;
2642 int remaining2; 2642 int remaining2;
2643 2643
@@ -2645,7 +2645,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_v
2645 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet 2645 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2646 * boundary, pkt_make_request() will split the bio. 2646 * boundary, pkt_make_request() will split the bio.
2647 */ 2647 */
2648 remaining2 = PAGE_SIZE - bio->bi_size; 2648 remaining2 = PAGE_SIZE - bmd->bi_size;
2649 remaining = max(remaining, remaining2); 2649 remaining = max(remaining, remaining2);
2650 2650
2651 BUG_ON(remaining < 0); 2651 BUG_ON(remaining < 0);
@@ -2796,9 +2796,14 @@ out_mem:
2796 return ret; 2796 return ret;
2797} 2797}
2798 2798
2799static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 2799static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2800{ 2800{
2801 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2801 struct inode *inode = file->f_path.dentry->d_inode;
2802 struct pktcdvd_device *pd;
2803 long ret;
2804
2805 lock_kernel();
2806 pd = inode->i_bdev->bd_disk->private_data;
2802 2807
2803 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2808 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2804 2809
@@ -2811,7 +2816,8 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2811 case CDROM_LAST_WRITTEN: 2816 case CDROM_LAST_WRITTEN:
2812 case CDROM_SEND_PACKET: 2817 case CDROM_SEND_PACKET:
2813 case SCSI_IOCTL_SEND_COMMAND: 2818 case SCSI_IOCTL_SEND_COMMAND:
2814 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2819 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2820 break;
2815 2821
2816 case CDROMEJECT: 2822 case CDROMEJECT:
2817 /* 2823 /*
@@ -2820,14 +2826,15 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2820 */ 2826 */
2821 if (pd->refcnt == 1) 2827 if (pd->refcnt == 1)
2822 pkt_lock_door(pd, 0); 2828 pkt_lock_door(pd, 0);
2823 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2829 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2830 break;
2824 2831
2825 default: 2832 default:
2826 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2833 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2827 return -ENOTTY; 2834 ret = -ENOTTY;
2828 } 2835 }
2829 2836 unlock_kernel();
2830 return 0; 2837 return ret;
2831} 2838}
2832 2839
2833static int pkt_media_changed(struct gendisk *disk) 2840static int pkt_media_changed(struct gendisk *disk)
@@ -2849,7 +2856,7 @@ static struct block_device_operations pktcdvd_ops = {
2849 .owner = THIS_MODULE, 2856 .owner = THIS_MODULE,
2850 .open = pkt_open, 2857 .open = pkt_open,
2851 .release = pkt_close, 2858 .release = pkt_close,
2852 .ioctl = pkt_ioctl, 2859 .unlocked_ioctl = pkt_ioctl,
2853 .media_changed = pkt_media_changed, 2860 .media_changed = pkt_media_changed,
2854}; 2861};
2855 2862
@@ -3014,7 +3021,8 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
3014 mutex_unlock(&ctl_mutex); 3021 mutex_unlock(&ctl_mutex);
3015} 3022}
3016 3023
3017static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 3024static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3025 unsigned long arg)
3018{ 3026{
3019 void __user *argp = (void __user *)arg; 3027 void __user *argp = (void __user *)arg;
3020 struct pkt_ctrl_command ctrl_cmd; 3028 struct pkt_ctrl_command ctrl_cmd;
@@ -3031,16 +3039,22 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
3031 case PKT_CTRL_CMD_SETUP: 3039 case PKT_CTRL_CMD_SETUP:
3032 if (!capable(CAP_SYS_ADMIN)) 3040 if (!capable(CAP_SYS_ADMIN))
3033 return -EPERM; 3041 return -EPERM;
3042 lock_kernel();
3034 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 3043 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
3035 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 3044 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
3045 unlock_kernel();
3036 break; 3046 break;
3037 case PKT_CTRL_CMD_TEARDOWN: 3047 case PKT_CTRL_CMD_TEARDOWN:
3038 if (!capable(CAP_SYS_ADMIN)) 3048 if (!capable(CAP_SYS_ADMIN))
3039 return -EPERM; 3049 return -EPERM;
3050 lock_kernel();
3040 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 3051 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
3052 unlock_kernel();
3041 break; 3053 break;
3042 case PKT_CTRL_CMD_STATUS: 3054 case PKT_CTRL_CMD_STATUS:
3055 lock_kernel();
3043 pkt_get_status(&ctrl_cmd); 3056 pkt_get_status(&ctrl_cmd);
3057 unlock_kernel();
3044 break; 3058 break;
3045 default: 3059 default:
3046 return -ENOTTY; 3060 return -ENOTTY;
@@ -3053,7 +3067,7 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
3053 3067
3054 3068
3055static const struct file_operations pkt_ctl_fops = { 3069static const struct file_operations pkt_ctl_fops = {
3056 .ioctl = pkt_ctl_ioctl, 3070 .unlocked_ioctl = pkt_ctl_ioctl,
3057 .owner = THIS_MODULE, 3071 .owner = THIS_MODULE,
3058}; 3072};
3059 3073
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f2fff5799ddf..9ae05c584234 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/hdreg.h> 40#include <linux/hdreg.h>
41#include <linux/cdrom.h>
41#include <linux/module.h> 42#include <linux/module.h>
42 43
43#include <xen/xenbus.h> 44#include <xen/xenbus.h>
@@ -153,6 +154,40 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
153 return 0; 154 return 0;
154} 155}
155 156
157int blkif_ioctl(struct inode *inode, struct file *filep,
158 unsigned command, unsigned long argument)
159{
160 struct blkfront_info *info =
161 inode->i_bdev->bd_disk->private_data;
162 int i;
163
164 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
165 command, (long)argument);
166
167 switch (command) {
168 case CDROMMULTISESSION:
169 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
170 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
171 if (put_user(0, (char __user *)(argument + i)))
172 return -EFAULT;
173 return 0;
174
175 case CDROM_GET_CAPABILITY: {
176 struct gendisk *gd = info->gd;
177 if (gd->flags & GENHD_FL_CD)
178 return 0;
179 return -EINVAL;
180 }
181
182 default:
183 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
184 command);*/
185 return -EINVAL; /* same return as native Linux */
186 }
187
188 return 0;
189}
190
156/* 191/*
157 * blkif_queue_request 192 * blkif_queue_request
158 * 193 *
@@ -324,6 +359,9 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
324 /* Make sure buffer addresses are sector-aligned. */ 359 /* Make sure buffer addresses are sector-aligned. */
325 blk_queue_dma_alignment(rq, 511); 360 blk_queue_dma_alignment(rq, 511);
326 361
362 /* Make sure we don't use bounce buffers. */
363 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
364
327 gd->queue = rq; 365 gd->queue = rq;
328 366
329 return 0; 367 return 0;
@@ -546,7 +584,7 @@ static int setup_blkring(struct xenbus_device *dev,
546 584
547 info->ring_ref = GRANT_INVALID_REF; 585 info->ring_ref = GRANT_INVALID_REF;
548 586
549 sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL); 587 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
550 if (!sring) { 588 if (!sring) {
551 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 589 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
552 return -ENOMEM; 590 return -ENOMEM;
@@ -703,7 +741,8 @@ static int blkif_recover(struct blkfront_info *info)
703 int j; 741 int j;
704 742
705 /* Stage 1: Make a safe copy of the shadow state. */ 743 /* Stage 1: Make a safe copy of the shadow state. */
706 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL); 744 copy = kmalloc(sizeof(info->shadow),
745 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
707 if (!copy) 746 if (!copy)
708 return -ENOMEM; 747 return -ENOMEM;
709 memcpy(copy, info->shadow, sizeof(info->shadow)); 748 memcpy(copy, info->shadow, sizeof(info->shadow));
@@ -959,7 +998,7 @@ static int blkif_release(struct inode *inode, struct file *filep)
959 struct xenbus_device *dev = info->xbdev; 998 struct xenbus_device *dev = info->xbdev;
960 enum xenbus_state state = xenbus_read_driver_state(dev->otherend); 999 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
961 1000
962 if (state == XenbusStateClosing) 1001 if (state == XenbusStateClosing && info->is_ready)
963 blkfront_closing(dev); 1002 blkfront_closing(dev);
964 } 1003 }
965 return 0; 1004 return 0;
@@ -971,6 +1010,7 @@ static struct block_device_operations xlvbd_block_fops =
971 .open = blkif_open, 1010 .open = blkif_open,
972 .release = blkif_release, 1011 .release = blkif_release,
973 .getgeo = blkif_getgeo, 1012 .getgeo = blkif_getgeo,
1013 .ioctl = blkif_ioctl,
974}; 1014};
975 1015
976 1016
@@ -1006,7 +1046,7 @@ static int __init xlblk_init(void)
1006module_init(xlblk_init); 1046module_init(xlblk_init);
1007 1047
1008 1048
1009static void xlblk_exit(void) 1049static void __exit xlblk_exit(void)
1010{ 1050{
1011 return xenbus_unregister_driver(&blkfront); 1051 return xenbus_unregister_driver(&blkfront);
1012} 1052}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 69f26eb6415b..a5da35632651 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -461,37 +461,27 @@ int cdrom_get_media_event(struct cdrom_device_info *cdi,
461 struct media_event_desc *med) 461 struct media_event_desc *med)
462{ 462{
463 struct packet_command cgc; 463 struct packet_command cgc;
464 unsigned char *buffer; 464 unsigned char buffer[8];
465 struct event_header *eh; 465 struct event_header *eh = (struct event_header *) buffer;
466 int ret = 1;
467
468 buffer = kmalloc(8, GFP_KERNEL);
469 if (!buffer)
470 return -ENOMEM;
471 466
472 eh = (struct event_header *)buffer; 467 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
473
474 init_cdrom_command(&cgc, buffer, 8, CGC_DATA_READ);
475 cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; 468 cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
476 cgc.cmd[1] = 1; /* IMMED */ 469 cgc.cmd[1] = 1; /* IMMED */
477 cgc.cmd[4] = 1 << 4; /* media event */ 470 cgc.cmd[4] = 1 << 4; /* media event */
478 cgc.cmd[8] = 8; 471 cgc.cmd[8] = sizeof(buffer);
479 cgc.quiet = 1; 472 cgc.quiet = 1;
480 473
481 if (cdi->ops->generic_packet(cdi, &cgc)) 474 if (cdi->ops->generic_packet(cdi, &cgc))
482 goto err; 475 return 1;
483 476
484 if (be16_to_cpu(eh->data_len) < sizeof(*med)) 477 if (be16_to_cpu(eh->data_len) < sizeof(*med))
485 goto err; 478 return 1;
486 479
487 if (eh->nea || eh->notification_class != 0x4) 480 if (eh->nea || eh->notification_class != 0x4)
488 goto err; 481 return 1;
489 482
490 memcpy(med, buffer + sizeof(*eh), sizeof(*med)); 483 memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
491 ret = 0; 484 return 0;
492err:
493 kfree(buffer);
494 return ret;
495} 485}
496 486
497/* 487/*
@@ -501,82 +491,68 @@ err:
501static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi) 491static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
502{ 492{
503 struct packet_command cgc; 493 struct packet_command cgc;
504 char *buffer; 494 char buffer[16];
505 int ret = 1;
506
507 buffer = kmalloc(16, GFP_KERNEL);
508 if (!buffer)
509 return -ENOMEM;
510 495
511 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 496 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
512 497
513 cgc.timeout = HZ; 498 cgc.timeout = HZ;
514 cgc.quiet = 1; 499 cgc.quiet = 1;
515 500
516 if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) { 501 if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
517 cdi->mrw_mode_page = MRW_MODE_PC; 502 cdi->mrw_mode_page = MRW_MODE_PC;
518 ret = 0; 503 return 0;
519 } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) { 504 } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
520 cdi->mrw_mode_page = MRW_MODE_PC_PRE1; 505 cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
521 ret = 0; 506 return 0;
522 } 507 }
523 kfree(buffer); 508
524 return ret; 509 return 1;
525} 510}
526 511
527static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write) 512static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
528{ 513{
529 struct packet_command cgc; 514 struct packet_command cgc;
530 struct mrw_feature_desc *mfd; 515 struct mrw_feature_desc *mfd;
531 unsigned char *buffer; 516 unsigned char buffer[16];
532 int ret; 517 int ret;
533 518
534 *write = 0; 519 *write = 0;
535 buffer = kmalloc(16, GFP_KERNEL);
536 if (!buffer)
537 return -ENOMEM;
538 520
539 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 521 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
540 522
541 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 523 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
542 cgc.cmd[3] = CDF_MRW; 524 cgc.cmd[3] = CDF_MRW;
543 cgc.cmd[8] = 16; 525 cgc.cmd[8] = sizeof(buffer);
544 cgc.quiet = 1; 526 cgc.quiet = 1;
545 527
546 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 528 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
547 goto err; 529 return ret;
548 530
549 mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)]; 531 mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
550 if (be16_to_cpu(mfd->feature_code) != CDF_MRW) { 532 if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
551 ret = 1; 533 return 1;
552 goto err;
553 }
554 *write = mfd->write; 534 *write = mfd->write;
555 535
556 if ((ret = cdrom_mrw_probe_pc(cdi))) { 536 if ((ret = cdrom_mrw_probe_pc(cdi))) {
557 *write = 0; 537 *write = 0;
538 return ret;
558 } 539 }
559err: 540
560 kfree(buffer); 541 return 0;
561 return ret;
562} 542}
563 543
564static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont) 544static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
565{ 545{
566 struct packet_command cgc; 546 struct packet_command cgc;
567 unsigned char *buffer; 547 unsigned char buffer[12];
568 int ret; 548 int ret;
569 549
570 printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : ""); 550 printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
571 551
572 buffer = kmalloc(12, GFP_KERNEL);
573 if (!buffer)
574 return -ENOMEM;
575
576 /* 552 /*
577 * FmtData bit set (bit 4), format type is 1 553 * FmtData bit set (bit 4), format type is 1
578 */ 554 */
579 init_cdrom_command(&cgc, buffer, 12, CGC_DATA_WRITE); 555 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
580 cgc.cmd[0] = GPCMD_FORMAT_UNIT; 556 cgc.cmd[0] = GPCMD_FORMAT_UNIT;
581 cgc.cmd[1] = (1 << 4) | 1; 557 cgc.cmd[1] = (1 << 4) | 1;
582 558
@@ -603,7 +579,6 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
603 if (ret) 579 if (ret)
604 printk(KERN_INFO "cdrom: bgformat failed\n"); 580 printk(KERN_INFO "cdrom: bgformat failed\n");
605 581
606 kfree(buffer);
607 return ret; 582 return ret;
608} 583}
609 584
@@ -663,17 +638,16 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
663{ 638{
664 struct packet_command cgc; 639 struct packet_command cgc;
665 struct mode_page_header *mph; 640 struct mode_page_header *mph;
666 char *buffer; 641 char buffer[16];
667 int ret, offset, size; 642 int ret, offset, size;
668 643
669 buffer = kmalloc(16, GFP_KERNEL); 644 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
670 if (!buffer)
671 return -ENOMEM;
672 645
673 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 646 cgc.buffer = buffer;
647 cgc.buflen = sizeof(buffer);
674 648
675 if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0))) 649 if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
676 goto err; 650 return ret;
677 651
678 mph = (struct mode_page_header *) buffer; 652 mph = (struct mode_page_header *) buffer;
679 offset = be16_to_cpu(mph->desc_length); 653 offset = be16_to_cpu(mph->desc_length);
@@ -683,70 +657,55 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
683 cgc.buflen = size; 657 cgc.buflen = size;
684 658
685 if ((ret = cdrom_mode_select(cdi, &cgc))) 659 if ((ret = cdrom_mode_select(cdi, &cgc)))
686 goto err; 660 return ret;
687 661
688 printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]); 662 printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
689 ret = 0; 663 return 0;
690err:
691 kfree(buffer);
692 return ret;
693} 664}
694 665
695static int cdrom_get_random_writable(struct cdrom_device_info *cdi, 666static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
696 struct rwrt_feature_desc *rfd) 667 struct rwrt_feature_desc *rfd)
697{ 668{
698 struct packet_command cgc; 669 struct packet_command cgc;
699 char *buffer; 670 char buffer[24];
700 int ret; 671 int ret;
701 672
702 buffer = kmalloc(24, GFP_KERNEL); 673 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
703 if (!buffer)
704 return -ENOMEM;
705
706 init_cdrom_command(&cgc, buffer, 24, CGC_DATA_READ);
707 674
708 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */ 675 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
709 cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */ 676 cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
710 cgc.cmd[8] = 24; /* often 0x18 */ 677 cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
711 cgc.quiet = 1; 678 cgc.quiet = 1;
712 679
713 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 680 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
714 goto err; 681 return ret;
715 682
716 memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd)); 683 memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
717 ret = 0; 684 return 0;
718err:
719 kfree(buffer);
720 return ret;
721} 685}
722 686
723static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi) 687static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
724{ 688{
725 struct packet_command cgc; 689 struct packet_command cgc;
726 char *buffer; 690 char buffer[16];
727 __be16 *feature_code; 691 __be16 *feature_code;
728 int ret; 692 int ret;
729 693
730 buffer = kmalloc(16, GFP_KERNEL); 694 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
731 if (!buffer)
732 return -ENOMEM;
733
734 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
735 695
736 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 696 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
737 cgc.cmd[3] = CDF_HWDM; 697 cgc.cmd[3] = CDF_HWDM;
738 cgc.cmd[8] = 16; 698 cgc.cmd[8] = sizeof(buffer);
739 cgc.quiet = 1; 699 cgc.quiet = 1;
740 700
741 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 701 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
742 goto err; 702 return ret;
743 703
744 feature_code = (__be16 *) &buffer[sizeof(struct feature_header)]; 704 feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
745 if (be16_to_cpu(*feature_code) == CDF_HWDM) 705 if (be16_to_cpu(*feature_code) == CDF_HWDM)
746 ret = 0; 706 return 0;
747err: 707
748 kfree(buffer); 708 return 1;
749 return ret;
750} 709}
751 710
752 711
@@ -837,14 +796,10 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
837static int mo_open_write(struct cdrom_device_info *cdi) 796static int mo_open_write(struct cdrom_device_info *cdi)
838{ 797{
839 struct packet_command cgc; 798 struct packet_command cgc;
840 char *buffer; 799 char buffer[255];
841 int ret; 800 int ret;
842 801
843 buffer = kmalloc(255, GFP_KERNEL); 802 init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
844 if (!buffer)
845 return -ENOMEM;
846
847 init_cdrom_command(&cgc, buffer, 4, CGC_DATA_READ);
848 cgc.quiet = 1; 803 cgc.quiet = 1;
849 804
850 /* 805 /*
@@ -861,15 +816,10 @@ static int mo_open_write(struct cdrom_device_info *cdi)
861 } 816 }
862 817
863 /* drive gave us no info, let the user go ahead */ 818 /* drive gave us no info, let the user go ahead */
864 if (ret) { 819 if (ret)
865 ret = 0; 820 return 0;
866 goto err;
867 }
868 821
869 ret = buffer[3] & 0x80; 822 return buffer[3] & 0x80;
870err:
871 kfree(buffer);
872 return ret;
873} 823}
874 824
875static int cdrom_ram_open_write(struct cdrom_device_info *cdi) 825static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
@@ -892,19 +842,15 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
892static void cdrom_mmc3_profile(struct cdrom_device_info *cdi) 842static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
893{ 843{
894 struct packet_command cgc; 844 struct packet_command cgc;
895 char *buffer; 845 char buffer[32];
896 int ret, mmc3_profile; 846 int ret, mmc3_profile;
897 847
898 buffer = kmalloc(32, GFP_KERNEL); 848 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
899 if (!buffer)
900 return;
901
902 init_cdrom_command(&cgc, buffer, 32, CGC_DATA_READ);
903 849
904 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 850 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
905 cgc.cmd[1] = 0; 851 cgc.cmd[1] = 0;
906 cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */ 852 cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
907 cgc.cmd[8] = 32; /* Allocation Length */ 853 cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
908 cgc.quiet = 1; 854 cgc.quiet = 1;
909 855
910 if ((ret = cdi->ops->generic_packet(cdi, &cgc))) 856 if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
@@ -913,7 +859,6 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
913 mmc3_profile = (buffer[6] << 8) | buffer[7]; 859 mmc3_profile = (buffer[6] << 8) | buffer[7];
914 860
915 cdi->mmc3_profile = mmc3_profile; 861 cdi->mmc3_profile = mmc3_profile;
916 kfree(buffer);
917} 862}
918 863
919static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi) 864static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
@@ -1628,15 +1573,12 @@ static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned t
1628static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) 1573static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1629{ 1574{
1630 int ret; 1575 int ret;
1631 u_char *buf; 1576 u_char buf[20];
1632 struct packet_command cgc; 1577 struct packet_command cgc;
1633 struct cdrom_device_ops *cdo = cdi->ops; 1578 struct cdrom_device_ops *cdo = cdi->ops;
1634 rpc_state_t *rpc_state; 1579 rpc_state_t rpc_state;
1635
1636 buf = kzalloc(20, GFP_KERNEL);
1637 if (!buf)
1638 return -ENOMEM;
1639 1580
1581 memset(buf, 0, sizeof(buf));
1640 init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ); 1582 init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
1641 1583
1642 switch (ai->type) { 1584 switch (ai->type) {
@@ -1647,7 +1589,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1647 setup_report_key(&cgc, ai->lsa.agid, 0); 1589 setup_report_key(&cgc, ai->lsa.agid, 0);
1648 1590
1649 if ((ret = cdo->generic_packet(cdi, &cgc))) 1591 if ((ret = cdo->generic_packet(cdi, &cgc)))
1650 goto err; 1592 return ret;
1651 1593
1652 ai->lsa.agid = buf[7] >> 6; 1594 ai->lsa.agid = buf[7] >> 6;
1653 /* Returning data, let host change state */ 1595 /* Returning data, let host change state */
@@ -1658,7 +1600,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1658 setup_report_key(&cgc, ai->lsk.agid, 2); 1600 setup_report_key(&cgc, ai->lsk.agid, 2);
1659 1601
1660 if ((ret = cdo->generic_packet(cdi, &cgc))) 1602 if ((ret = cdo->generic_packet(cdi, &cgc)))
1661 goto err; 1603 return ret;
1662 1604
1663 copy_key(ai->lsk.key, &buf[4]); 1605 copy_key(ai->lsk.key, &buf[4]);
1664 /* Returning data, let host change state */ 1606 /* Returning data, let host change state */
@@ -1669,7 +1611,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1669 setup_report_key(&cgc, ai->lsc.agid, 1); 1611 setup_report_key(&cgc, ai->lsc.agid, 1);
1670 1612
1671 if ((ret = cdo->generic_packet(cdi, &cgc))) 1613 if ((ret = cdo->generic_packet(cdi, &cgc)))
1672 goto err; 1614 return ret;
1673 1615
1674 copy_chal(ai->lsc.chal, &buf[4]); 1616 copy_chal(ai->lsc.chal, &buf[4]);
1675 /* Returning data, let host change state */ 1617 /* Returning data, let host change state */
@@ -1686,7 +1628,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1686 cgc.cmd[2] = ai->lstk.lba >> 24; 1628 cgc.cmd[2] = ai->lstk.lba >> 24;
1687 1629
1688 if ((ret = cdo->generic_packet(cdi, &cgc))) 1630 if ((ret = cdo->generic_packet(cdi, &cgc)))
1689 goto err; 1631 return ret;
1690 1632
1691 ai->lstk.cpm = (buf[4] >> 7) & 1; 1633 ai->lstk.cpm = (buf[4] >> 7) & 1;
1692 ai->lstk.cp_sec = (buf[4] >> 6) & 1; 1634 ai->lstk.cp_sec = (buf[4] >> 6) & 1;
@@ -1700,7 +1642,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1700 setup_report_key(&cgc, ai->lsasf.agid, 5); 1642 setup_report_key(&cgc, ai->lsasf.agid, 5);
1701 1643
1702 if ((ret = cdo->generic_packet(cdi, &cgc))) 1644 if ((ret = cdo->generic_packet(cdi, &cgc)))
1703 goto err; 1645 return ret;
1704 1646
1705 ai->lsasf.asf = buf[7] & 1; 1647 ai->lsasf.asf = buf[7] & 1;
1706 break; 1648 break;
@@ -1713,7 +1655,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1713 copy_chal(&buf[4], ai->hsc.chal); 1655 copy_chal(&buf[4], ai->hsc.chal);
1714 1656
1715 if ((ret = cdo->generic_packet(cdi, &cgc))) 1657 if ((ret = cdo->generic_packet(cdi, &cgc)))
1716 goto err; 1658 return ret;
1717 1659
1718 ai->type = DVD_LU_SEND_KEY1; 1660 ai->type = DVD_LU_SEND_KEY1;
1719 break; 1661 break;
@@ -1726,7 +1668,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1726 1668
1727 if ((ret = cdo->generic_packet(cdi, &cgc))) { 1669 if ((ret = cdo->generic_packet(cdi, &cgc))) {
1728 ai->type = DVD_AUTH_FAILURE; 1670 ai->type = DVD_AUTH_FAILURE;
1729 goto err; 1671 return ret;
1730 } 1672 }
1731 ai->type = DVD_AUTH_ESTABLISHED; 1673 ai->type = DVD_AUTH_ESTABLISHED;
1732 break; 1674 break;
@@ -1737,23 +1679,24 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1737 cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); 1679 cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
1738 setup_report_key(&cgc, ai->lsa.agid, 0x3f); 1680 setup_report_key(&cgc, ai->lsa.agid, 0x3f);
1739 if ((ret = cdo->generic_packet(cdi, &cgc))) 1681 if ((ret = cdo->generic_packet(cdi, &cgc)))
1740 goto err; 1682 return ret;
1741 break; 1683 break;
1742 1684
1743 /* Get region settings */ 1685 /* Get region settings */
1744 case DVD_LU_SEND_RPC_STATE: 1686 case DVD_LU_SEND_RPC_STATE:
1745 cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); 1687 cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
1746 setup_report_key(&cgc, 0, 8); 1688 setup_report_key(&cgc, 0, 8);
1689 memset(&rpc_state, 0, sizeof(rpc_state_t));
1690 cgc.buffer = (char *) &rpc_state;
1747 1691
1748 if ((ret = cdo->generic_packet(cdi, &cgc))) 1692 if ((ret = cdo->generic_packet(cdi, &cgc)))
1749 goto err; 1693 return ret;
1750 1694
1751 rpc_state = (rpc_state_t *)buf; 1695 ai->lrpcs.type = rpc_state.type_code;
1752 ai->lrpcs.type = rpc_state->type_code; 1696 ai->lrpcs.vra = rpc_state.vra;
1753 ai->lrpcs.vra = rpc_state->vra; 1697 ai->lrpcs.ucca = rpc_state.ucca;
1754 ai->lrpcs.ucca = rpc_state->ucca; 1698 ai->lrpcs.region_mask = rpc_state.region_mask;
1755 ai->lrpcs.region_mask = rpc_state->region_mask; 1699 ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
1756 ai->lrpcs.rpc_scheme = rpc_state->rpc_scheme;
1757 break; 1700 break;
1758 1701
1759 /* Set region settings */ 1702 /* Set region settings */
@@ -1764,23 +1707,20 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1764 buf[4] = ai->hrpcs.pdrc; 1707 buf[4] = ai->hrpcs.pdrc;
1765 1708
1766 if ((ret = cdo->generic_packet(cdi, &cgc))) 1709 if ((ret = cdo->generic_packet(cdi, &cgc)))
1767 goto err; 1710 return ret;
1768 break; 1711 break;
1769 1712
1770 default: 1713 default:
1771 cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); 1714 cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
1772 ret = -ENOTTY; 1715 return -ENOTTY;
1773 goto err;
1774 } 1716 }
1775 ret = 0; 1717
1776err: 1718 return 0;
1777 kfree(buf);
1778 return ret;
1779} 1719}
1780 1720
1781static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s) 1721static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1782{ 1722{
1783 unsigned char *buf, *base; 1723 unsigned char buf[21], *base;
1784 struct dvd_layer *layer; 1724 struct dvd_layer *layer;
1785 struct packet_command cgc; 1725 struct packet_command cgc;
1786 struct cdrom_device_ops *cdo = cdi->ops; 1726 struct cdrom_device_ops *cdo = cdi->ops;
@@ -1789,11 +1729,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1789 if (layer_num >= DVD_LAYERS) 1729 if (layer_num >= DVD_LAYERS)
1790 return -EINVAL; 1730 return -EINVAL;
1791 1731
1792 buf = kmalloc(21, GFP_KERNEL); 1732 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1793 if (!buf)
1794 return -ENOMEM;
1795
1796 init_cdrom_command(&cgc, buf, 21, CGC_DATA_READ);
1797 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1733 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1798 cgc.cmd[6] = layer_num; 1734 cgc.cmd[6] = layer_num;
1799 cgc.cmd[7] = s->type; 1735 cgc.cmd[7] = s->type;
@@ -1805,7 +1741,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1805 cgc.quiet = 1; 1741 cgc.quiet = 1;
1806 1742
1807 if ((ret = cdo->generic_packet(cdi, &cgc))) 1743 if ((ret = cdo->generic_packet(cdi, &cgc)))
1808 goto err; 1744 return ret;
1809 1745
1810 base = &buf[4]; 1746 base = &buf[4];
1811 layer = &s->physical.layer[layer_num]; 1747 layer = &s->physical.layer[layer_num];
@@ -1829,24 +1765,17 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1829 layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15]; 1765 layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
1830 layer->bca = base[16] >> 7; 1766 layer->bca = base[16] >> 7;
1831 1767
1832 ret = 0; 1768 return 0;
1833err:
1834 kfree(buf);
1835 return ret;
1836} 1769}
1837 1770
1838static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s) 1771static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
1839{ 1772{
1840 int ret; 1773 int ret;
1841 u_char *buf; 1774 u_char buf[8];
1842 struct packet_command cgc; 1775 struct packet_command cgc;
1843 struct cdrom_device_ops *cdo = cdi->ops; 1776 struct cdrom_device_ops *cdo = cdi->ops;
1844 1777
1845 buf = kmalloc(8, GFP_KERNEL); 1778 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1846 if (!buf)
1847 return -ENOMEM;
1848
1849 init_cdrom_command(&cgc, buf, 8, CGC_DATA_READ);
1850 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1779 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1851 cgc.cmd[6] = s->copyright.layer_num; 1780 cgc.cmd[6] = s->copyright.layer_num;
1852 cgc.cmd[7] = s->type; 1781 cgc.cmd[7] = s->type;
@@ -1854,15 +1783,12 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
1854 cgc.cmd[9] = cgc.buflen & 0xff; 1783 cgc.cmd[9] = cgc.buflen & 0xff;
1855 1784
1856 if ((ret = cdo->generic_packet(cdi, &cgc))) 1785 if ((ret = cdo->generic_packet(cdi, &cgc)))
1857 goto err; 1786 return ret;
1858 1787
1859 s->copyright.cpst = buf[4]; 1788 s->copyright.cpst = buf[4];
1860 s->copyright.rmi = buf[5]; 1789 s->copyright.rmi = buf[5];
1861 1790
1862 ret = 0; 1791 return 0;
1863err:
1864 kfree(buf);
1865 return ret;
1866} 1792}
1867 1793
1868static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s) 1794static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
@@ -1894,33 +1820,26 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
1894static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s) 1820static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
1895{ 1821{
1896 int ret; 1822 int ret;
1897 u_char *buf; 1823 u_char buf[4 + 188];
1898 struct packet_command cgc; 1824 struct packet_command cgc;
1899 struct cdrom_device_ops *cdo = cdi->ops; 1825 struct cdrom_device_ops *cdo = cdi->ops;
1900 1826
1901 buf = kmalloc(4 + 188, GFP_KERNEL); 1827 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1902 if (!buf)
1903 return -ENOMEM;
1904
1905 init_cdrom_command(&cgc, buf, 4 + 188, CGC_DATA_READ);
1906 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1828 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1907 cgc.cmd[7] = s->type; 1829 cgc.cmd[7] = s->type;
1908 cgc.cmd[9] = cgc.buflen & 0xff; 1830 cgc.cmd[9] = cgc.buflen & 0xff;
1909 1831
1910 if ((ret = cdo->generic_packet(cdi, &cgc))) 1832 if ((ret = cdo->generic_packet(cdi, &cgc)))
1911 goto err; 1833 return ret;
1912 1834
1913 s->bca.len = buf[0] << 8 | buf[1]; 1835 s->bca.len = buf[0] << 8 | buf[1];
1914 if (s->bca.len < 12 || s->bca.len > 188) { 1836 if (s->bca.len < 12 || s->bca.len > 188) {
1915 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); 1837 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
1916 ret = -EIO; 1838 return -EIO;
1917 goto err;
1918 } 1839 }
1919 memcpy(s->bca.value, &buf[4], s->bca.len); 1840 memcpy(s->bca.value, &buf[4], s->bca.len);
1920 ret = 0; 1841
1921err: 1842 return 0;
1922 kfree(buf);
1923 return ret;
1924} 1843}
1925 1844
1926static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s) 1845static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
@@ -2020,13 +1939,9 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2020{ 1939{
2021 struct cdrom_device_ops *cdo = cdi->ops; 1940 struct cdrom_device_ops *cdo = cdi->ops;
2022 struct packet_command cgc; 1941 struct packet_command cgc;
2023 char *buffer; 1942 char buffer[32];
2024 int ret; 1943 int ret;
2025 1944
2026 buffer = kmalloc(32, GFP_KERNEL);
2027 if (!buffer)
2028 return -ENOMEM;
2029
2030 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); 1945 init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
2031 cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; 1946 cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
2032 cgc.cmd[1] = 2; /* MSF addressing */ 1947 cgc.cmd[1] = 2; /* MSF addressing */
@@ -2035,7 +1950,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2035 cgc.cmd[8] = 16; 1950 cgc.cmd[8] = 16;
2036 1951
2037 if ((ret = cdo->generic_packet(cdi, &cgc))) 1952 if ((ret = cdo->generic_packet(cdi, &cgc)))
2038 goto err; 1953 return ret;
2039 1954
2040 subchnl->cdsc_audiostatus = cgc.buffer[1]; 1955 subchnl->cdsc_audiostatus = cgc.buffer[1];
2041 subchnl->cdsc_format = CDROM_MSF; 1956 subchnl->cdsc_format = CDROM_MSF;
@@ -2050,10 +1965,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2050 subchnl->cdsc_absaddr.msf.second = cgc.buffer[10]; 1965 subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
2051 subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11]; 1966 subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
2052 1967
2053 ret = 0; 1968 return 0;
2054err:
2055 kfree(buffer);
2056 return ret;
2057} 1969}
2058 1970
2059/* 1971/*
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 4c1c584e9eb6..81630a68475c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -101,7 +101,6 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o
101 101
102obj-$(CONFIG_MWAVE) += mwave/ 102obj-$(CONFIG_MWAVE) += mwave/
103obj-$(CONFIG_AGP) += agp/ 103obj-$(CONFIG_AGP) += agp/
104obj-$(CONFIG_DRM) += drm/
105obj-$(CONFIG_PCMCIA) += pcmcia/ 104obj-$(CONFIG_PCMCIA) += pcmcia/
106obj-$(CONFIG_IPMI_HANDLER) += ipmi/ 105obj-$(CONFIG_IPMI_HANDLER) += ipmi/
107 106
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 13665db363d6..481ffe87c716 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,28 +16,9 @@
16#include <asm/page.h> /* PAGE_SIZE */ 16#include <asm/page.h> /* PAGE_SIZE */
17#include <asm/e820.h> 17#include <asm/e820.h>
18#include <asm/k8.h> 18#include <asm/k8.h>
19#include <asm/gart.h>
19#include "agp.h" 20#include "agp.h"
20 21
21/* PTE bits. */
22#define GPTE_VALID 1
23#define GPTE_COHERENT 2
24
25/* Aperture control register bits. */
26#define GARTEN (1<<0)
27#define DISGARTCPU (1<<4)
28#define DISGARTIO (1<<5)
29
30/* GART cache control register bits. */
31#define INVGART (1<<0)
32#define GARTPTEERR (1<<1)
33
34/* K8 On-cpu GART registers */
35#define AMD64_GARTAPERTURECTL 0x90
36#define AMD64_GARTAPERTUREBASE 0x94
37#define AMD64_GARTTABLEBASE 0x98
38#define AMD64_GARTCACHECTL 0x9c
39#define AMD64_GARTEN (1<<0)
40
41/* NVIDIA K8 registers */ 22/* NVIDIA K8 registers */
42#define NVIDIA_X86_64_0_APBASE 0x10 23#define NVIDIA_X86_64_0_APBASE 0x10
43#define NVIDIA_X86_64_1_APBASE1 0x50 24#define NVIDIA_X86_64_1_APBASE1 0x50
@@ -165,29 +146,18 @@ static int amd64_fetch_size(void)
165 * In a multiprocessor x86-64 system, this function gets 146 * In a multiprocessor x86-64 system, this function gets
166 * called once for each CPU. 147 * called once for each CPU.
167 */ 148 */
168static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table) 149static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
169{ 150{
170 u64 aperturebase; 151 u64 aperturebase;
171 u32 tmp; 152 u32 tmp;
172 u64 addr, aper_base; 153 u64 aper_base;
173 154
174 /* Address to map to */ 155 /* Address to map to */
175 pci_read_config_dword (hammer, AMD64_GARTAPERTUREBASE, &tmp); 156 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
176 aperturebase = tmp << 25; 157 aperturebase = tmp << 25;
177 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); 158 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
178 159
179 /* address of the mappings table */ 160 enable_gart_translation(hammer, gatt_table);
180 addr = (u64) gatt_table;
181 addr >>= 12;
182 tmp = (u32) addr<<4;
183 tmp &= ~0xf;
184 pci_write_config_dword (hammer, AMD64_GARTTABLEBASE, tmp);
185
186 /* Enable GART translation for this hammer. */
187 pci_read_config_dword(hammer, AMD64_GARTAPERTURECTL, &tmp);
188 tmp |= GARTEN;
189 tmp &= ~(DISGARTCPU | DISGARTIO);
190 pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
191 161
192 return aper_base; 162 return aper_base;
193} 163}
@@ -226,9 +196,9 @@ static void amd64_cleanup(void)
226 for (i = 0; i < num_k8_northbridges; i++) { 196 for (i = 0; i < num_k8_northbridges; i++) {
227 struct pci_dev *dev = k8_northbridges[i]; 197 struct pci_dev *dev = k8_northbridges[i];
228 /* disable gart translation */ 198 /* disable gart translation */
229 pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp); 199 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
230 tmp &= ~AMD64_GARTEN; 200 tmp &= ~AMD64_GARTEN;
231 pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp); 201 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
232 } 202 }
233} 203}
234 204
@@ -258,24 +228,10 @@ static const struct agp_bridge_driver amd_8151_driver = {
258}; 228};
259 229
260/* Some basic sanity checks for the aperture. */ 230/* Some basic sanity checks for the aperture. */
261static int __devinit aperture_valid(u64 aper, u32 size) 231static int __devinit agp_aperture_valid(u64 aper, u32 size)
262{ 232{
263 if (aper == 0) { 233 if (!aperture_valid(aper, size, 32*1024*1024))
264 printk(KERN_ERR PFX "No aperture\n");
265 return 0;
266 }
267 if (size < 32*1024*1024) {
268 printk(KERN_ERR PFX "Aperture too small (%d MB)\n", size>>20);
269 return 0;
270 }
271 if ((u64)aper + size > 0x100000000ULL) {
272 printk(KERN_ERR PFX "Aperture out of bounds\n");
273 return 0; 234 return 0;
274 }
275 if (e820_any_mapped(aper, aper + size, E820_RAM)) {
276 printk(KERN_ERR PFX "Aperture pointing to RAM\n");
277 return 0;
278 }
279 235
280 /* Request the Aperture. This catches cases when someone else 236 /* Request the Aperture. This catches cases when someone else
281 already put a mapping in there - happens with some very broken BIOS 237 already put a mapping in there - happens with some very broken BIOS
@@ -308,11 +264,11 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
308 u32 nb_order, nb_base; 264 u32 nb_order, nb_base;
309 u16 apsize; 265 u16 apsize;
310 266
311 pci_read_config_dword(nb, 0x90, &nb_order); 267 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
312 nb_order = (nb_order >> 1) & 7; 268 nb_order = (nb_order >> 1) & 7;
313 pci_read_config_dword(nb, 0x94, &nb_base); 269 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
314 nb_aper = nb_base << 25; 270 nb_aper = nb_base << 25;
315 if (aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) { 271 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
316 return 0; 272 return 0;
317 } 273 }
318 274
@@ -331,12 +287,23 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
331 pci_read_config_dword(agp, 0x10, &aper_low); 287 pci_read_config_dword(agp, 0x10, &aper_low);
332 pci_read_config_dword(agp, 0x14, &aper_hi); 288 pci_read_config_dword(agp, 0x14, &aper_hi);
333 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); 289 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
290
291 /*
292 * On some sick chips APSIZE is 0. This means it wants 4G
293 * so let double check that order, and lets trust the AMD NB settings
294 */
295 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
296 printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n",
297 32 << order);
298 order = nb_order;
299 }
300
334 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); 301 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
335 if (order < 0 || !aperture_valid(aper, (32*1024*1024)<<order)) 302 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
336 return -1; 303 return -1;
337 304
338 pci_write_config_dword(nb, 0x90, order << 1); 305 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1);
339 pci_write_config_dword(nb, 0x94, aper >> 25); 306 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
340 307
341 return 0; 308 return 0;
342} 309}
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
deleted file mode 100644
index 1283ded88ead..000000000000
--- a/drivers/char/drm/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
6 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
10
11tdfx-objs := tdfx_drv.o
12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
13mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
14i810-objs := i810_drv.o i810_dma.o
15i830-objs := i830_drv.o i830_dma.o i830_irq.o
16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
18sis-objs := sis_drv.o sis_mm.o
19savage-objs := savage_drv.o savage_bci.o savage_state.o
20via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
21
22ifeq ($(CONFIG_COMPAT),y)
23drm-objs += drm_ioc32.o
24radeon-objs += radeon_ioc32.o
25mga-objs += mga_ioc32.o
26r128-objs += r128_ioc32.o
27i915-objs += i915_ioc32.o
28endif
29
30obj-$(CONFIG_DRM) += drm.o
31obj-$(CONFIG_DRM_TDFX) += tdfx.o
32obj-$(CONFIG_DRM_R128) += r128.o
33obj-$(CONFIG_DRM_RADEON)+= radeon.o
34obj-$(CONFIG_DRM_MGA) += mga.o
35obj-$(CONFIG_DRM_I810) += i810.o
36obj-$(CONFIG_DRM_I830) += i830.o
37obj-$(CONFIG_DRM_I915) += i915.o
38obj-$(CONFIG_DRM_SIS) += sis.o
39obj-$(CONFIG_DRM_SAVAGE)+= savage.o
40obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
deleted file mode 100644
index 38d3c6b8276a..000000000000
--- a/drivers/char/drm/drm.h
+++ /dev/null
@@ -1,694 +0,0 @@
1/**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 *
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9 */
10
11/*
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#ifndef _DRM_H_
37#define _DRM_H_
38
39#if defined(__linux__)
40#if defined(__KERNEL__)
41#endif
42#include <asm/ioctl.h> /* For _IO* macros */
43#define DRM_IOCTL_NR(n) _IOC_NR(n)
44#define DRM_IOC_VOID _IOC_NONE
45#define DRM_IOC_READ _IOC_READ
46#define DRM_IOC_WRITE _IOC_WRITE
47#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
48#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
49#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
50#if defined(__FreeBSD__) && defined(IN_MODULE)
51/* Prevent name collision when including sys/ioccom.h */
52#undef ioctl
53#include <sys/ioccom.h>
54#define ioctl(a,b,c) xf86ioctl(a,b,c)
55#else
56#include <sys/ioccom.h>
57#endif /* __FreeBSD__ && xf86ioctl */
58#define DRM_IOCTL_NR(n) ((n) & 0xff)
59#define DRM_IOC_VOID IOC_VOID
60#define DRM_IOC_READ IOC_OUT
61#define DRM_IOC_WRITE IOC_IN
62#define DRM_IOC_READWRITE IOC_INOUT
63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
64#endif
65
66#define DRM_MAJOR 226
67#define DRM_MAX_MINOR 15
68
69#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
70#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
71#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
72#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
73
74#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
75#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
76#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
77#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
78#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
79
80typedef unsigned int drm_handle_t;
81typedef unsigned int drm_context_t;
82typedef unsigned int drm_drawable_t;
83typedef unsigned int drm_magic_t;
84
85/**
86 * Cliprect.
87 *
88 * \warning: If you change this structure, make sure you change
89 * XF86DRIClipRectRec in the server as well
90 *
91 * \note KW: Actually it's illegal to change either for
92 * backwards-compatibility reasons.
93 */
94struct drm_clip_rect {
95 unsigned short x1;
96 unsigned short y1;
97 unsigned short x2;
98 unsigned short y2;
99};
100
101/**
102 * Drawable information.
103 */
104struct drm_drawable_info {
105 unsigned int num_rects;
106 struct drm_clip_rect *rects;
107};
108
109/**
110 * Texture region,
111 */
112struct drm_tex_region {
113 unsigned char next;
114 unsigned char prev;
115 unsigned char in_use;
116 unsigned char padding;
117 unsigned int age;
118};
119
120/**
121 * Hardware lock.
122 *
123 * The lock structure is a simple cache-line aligned integer. To avoid
124 * processor bus contention on a multiprocessor system, there should not be any
125 * other data stored in the same cache line.
126 */
127struct drm_hw_lock {
128 __volatile__ unsigned int lock; /**< lock variable */
129 char padding[60]; /**< Pad to cache line */
130};
131
132/**
133 * DRM_IOCTL_VERSION ioctl argument type.
134 *
135 * \sa drmGetVersion().
136 */
137struct drm_version {
138 int version_major; /**< Major version */
139 int version_minor; /**< Minor version */
140 int version_patchlevel; /**< Patch level */
141 size_t name_len; /**< Length of name buffer */
142 char __user *name; /**< Name of driver */
143 size_t date_len; /**< Length of date buffer */
144 char __user *date; /**< User-space buffer to hold date */
145 size_t desc_len; /**< Length of desc buffer */
146 char __user *desc; /**< User-space buffer to hold desc */
147};
148
149/**
150 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
151 *
152 * \sa drmGetBusid() and drmSetBusId().
153 */
154struct drm_unique {
155 size_t unique_len; /**< Length of unique */
156 char __user *unique; /**< Unique name for driver instantiation */
157};
158
159struct drm_list {
160 int count; /**< Length of user-space structures */
161 struct drm_version __user *version;
162};
163
164struct drm_block {
165 int unused;
166};
167
168/**
169 * DRM_IOCTL_CONTROL ioctl argument type.
170 *
171 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
172 */
173struct drm_control {
174 enum {
175 DRM_ADD_COMMAND,
176 DRM_RM_COMMAND,
177 DRM_INST_HANDLER,
178 DRM_UNINST_HANDLER
179 } func;
180 int irq;
181};
182
183/**
184 * Type of memory to map.
185 */
186enum drm_map_type {
187 _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
188 _DRM_REGISTERS = 1, /**< no caching, no core dump */
189 _DRM_SHM = 2, /**< shared, cached */
190 _DRM_AGP = 3, /**< AGP/GART */
191 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
192 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
193};
194
195/**
196 * Memory mapping flags.
197 */
198enum drm_map_flags {
199 _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
200 _DRM_READ_ONLY = 0x02,
201 _DRM_LOCKED = 0x04, /**< shared, cached, locked */
202 _DRM_KERNEL = 0x08, /**< kernel requires access */
203 _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
204 _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
205 _DRM_REMOVABLE = 0x40, /**< Removable mapping */
206 _DRM_DRIVER = 0x80 /**< Managed by driver */
207};
208
209struct drm_ctx_priv_map {
210 unsigned int ctx_id; /**< Context requesting private mapping */
211 void *handle; /**< Handle of map */
212};
213
214/**
215 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
216 * argument type.
217 *
218 * \sa drmAddMap().
219 */
220struct drm_map {
221 unsigned long offset; /**< Requested physical address (0 for SAREA)*/
222 unsigned long size; /**< Requested physical size (bytes) */
223 enum drm_map_type type; /**< Type of memory to map */
224 enum drm_map_flags flags; /**< Flags */
225 void *handle; /**< User-space: "Handle" to pass to mmap() */
226 /**< Kernel-space: kernel-virtual address */
227 int mtrr; /**< MTRR slot used */
228 /* Private data */
229};
230
231/**
232 * DRM_IOCTL_GET_CLIENT ioctl argument type.
233 */
234struct drm_client {
235 int idx; /**< Which client desired? */
236 int auth; /**< Is client authenticated? */
237 unsigned long pid; /**< Process ID */
238 unsigned long uid; /**< User ID */
239 unsigned long magic; /**< Magic */
240 unsigned long iocs; /**< Ioctl count */
241};
242
243enum drm_stat_type {
244 _DRM_STAT_LOCK,
245 _DRM_STAT_OPENS,
246 _DRM_STAT_CLOSES,
247 _DRM_STAT_IOCTLS,
248 _DRM_STAT_LOCKS,
249 _DRM_STAT_UNLOCKS,
250 _DRM_STAT_VALUE, /**< Generic value */
251 _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
252 _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
253
254 _DRM_STAT_IRQ, /**< IRQ */
255 _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
256 _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
257 _DRM_STAT_DMA, /**< DMA */
258 _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
259 _DRM_STAT_MISSED /**< Missed DMA opportunity */
260 /* Add to the *END* of the list */
261};
262
263/**
264 * DRM_IOCTL_GET_STATS ioctl argument type.
265 */
266struct drm_stats {
267 unsigned long count;
268 struct {
269 unsigned long value;
270 enum drm_stat_type type;
271 } data[15];
272};
273
274/**
275 * Hardware locking flags.
276 */
277enum drm_lock_flags {
278 _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
279 _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
280 _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
281 _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
282 /* These *HALT* flags aren't supported yet
283 -- they will be used to support the
284 full-screen DGA-like mode. */
285 _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
286 _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
287};
288
289/**
290 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
291 *
292 * \sa drmGetLock() and drmUnlock().
293 */
294struct drm_lock {
295 int context;
296 enum drm_lock_flags flags;
297};
298
299/**
300 * DMA flags
301 *
302 * \warning
303 * These values \e must match xf86drm.h.
304 *
305 * \sa drm_dma.
306 */
307enum drm_dma_flags {
308 /* Flags for DMA buffer dispatch */
309 _DRM_DMA_BLOCK = 0x01, /**<
310 * Block until buffer dispatched.
311 *
312 * \note The buffer may not yet have
313 * been processed by the hardware --
314 * getting a hardware lock with the
315 * hardware quiescent will ensure
316 * that the buffer has been
317 * processed.
318 */
319 _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
320 _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
321
322 /* Flags for DMA buffer request */
323 _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
324 _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
325 _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
326};
327
328/**
329 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
330 *
331 * \sa drmAddBufs().
332 */
333struct drm_buf_desc {
334 int count; /**< Number of buffers of this size */
335 int size; /**< Size in bytes */
336 int low_mark; /**< Low water mark */
337 int high_mark; /**< High water mark */
338 enum {
339 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
340 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
341 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
342 _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
343 _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
344 } flags;
345 unsigned long agp_start; /**<
346 * Start address of where the AGP buffers are
347 * in the AGP aperture
348 */
349};
350
351/**
352 * DRM_IOCTL_INFO_BUFS ioctl argument type.
353 */
354struct drm_buf_info {
355 int count; /**< Entries in list */
356 struct drm_buf_desc __user *list;
357};
358
359/**
360 * DRM_IOCTL_FREE_BUFS ioctl argument type.
361 */
362struct drm_buf_free {
363 int count;
364 int __user *list;
365};
366
367/**
368 * Buffer information
369 *
370 * \sa drm_buf_map.
371 */
372struct drm_buf_pub {
373 int idx; /**< Index into the master buffer list */
374 int total; /**< Buffer size */
375 int used; /**< Amount of buffer in use (for DMA) */
376 void __user *address; /**< Address of buffer */
377};
378
379/**
380 * DRM_IOCTL_MAP_BUFS ioctl argument type.
381 */
382struct drm_buf_map {
383 int count; /**< Length of the buffer list */
384 void __user *virtual; /**< Mmap'd area in user-virtual */
385 struct drm_buf_pub __user *list; /**< Buffer information */
386};
387
388/**
389 * DRM_IOCTL_DMA ioctl argument type.
390 *
391 * Indices here refer to the offset into the buffer list in drm_buf_get.
392 *
393 * \sa drmDMA().
394 */
395struct drm_dma {
396 int context; /**< Context handle */
397 int send_count; /**< Number of buffers to send */
398 int __user *send_indices; /**< List of handles to buffers */
399 int __user *send_sizes; /**< Lengths of data to send */
400 enum drm_dma_flags flags; /**< Flags */
401 int request_count; /**< Number of buffers requested */
402 int request_size; /**< Desired size for buffers */
403 int __user *request_indices; /**< Buffer information */
404 int __user *request_sizes;
405 int granted_count; /**< Number of buffers granted */
406};
407
408enum drm_ctx_flags {
409 _DRM_CONTEXT_PRESERVED = 0x01,
410 _DRM_CONTEXT_2DONLY = 0x02
411};
412
413/**
414 * DRM_IOCTL_ADD_CTX ioctl argument type.
415 *
416 * \sa drmCreateContext() and drmDestroyContext().
417 */
418struct drm_ctx {
419 drm_context_t handle;
420 enum drm_ctx_flags flags;
421};
422
423/**
424 * DRM_IOCTL_RES_CTX ioctl argument type.
425 */
426struct drm_ctx_res {
427 int count;
428 struct drm_ctx __user *contexts;
429};
430
431/**
432 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
433 */
434struct drm_draw {
435 drm_drawable_t handle;
436};
437
438/**
439 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
440 */
441typedef enum {
442 DRM_DRAWABLE_CLIPRECTS,
443} drm_drawable_info_type_t;
444
445struct drm_update_draw {
446 drm_drawable_t handle;
447 unsigned int type;
448 unsigned int num;
449 unsigned long long data;
450};
451
452/**
453 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
454 */
455struct drm_auth {
456 drm_magic_t magic;
457};
458
459/**
460 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
461 *
462 * \sa drmGetInterruptFromBusID().
463 */
464struct drm_irq_busid {
465 int irq; /**< IRQ number */
466 int busnum; /**< bus number */
467 int devnum; /**< device number */
468 int funcnum; /**< function number */
469};
470
471enum drm_vblank_seq_type {
472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
475 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
476 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
477};
478
479#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
480#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
481 _DRM_VBLANK_NEXTONMISS)
482
483struct drm_wait_vblank_request {
484 enum drm_vblank_seq_type type;
485 unsigned int sequence;
486 unsigned long signal;
487};
488
489struct drm_wait_vblank_reply {
490 enum drm_vblank_seq_type type;
491 unsigned int sequence;
492 long tval_sec;
493 long tval_usec;
494};
495
496/**
497 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
498 *
499 * \sa drmWaitVBlank().
500 */
501union drm_wait_vblank {
502 struct drm_wait_vblank_request request;
503 struct drm_wait_vblank_reply reply;
504};
505
506/**
507 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
508 *
509 * \sa drmAgpEnable().
510 */
511struct drm_agp_mode {
512 unsigned long mode; /**< AGP mode */
513};
514
515/**
516 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
517 *
518 * \sa drmAgpAlloc() and drmAgpFree().
519 */
520struct drm_agp_buffer {
521 unsigned long size; /**< In bytes -- will round to page boundary */
522 unsigned long handle; /**< Used for binding / unbinding */
523 unsigned long type; /**< Type of memory to allocate */
524 unsigned long physical; /**< Physical used by i810 */
525};
526
527/**
528 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
529 *
530 * \sa drmAgpBind() and drmAgpUnbind().
531 */
532struct drm_agp_binding {
533 unsigned long handle; /**< From drm_agp_buffer */
534 unsigned long offset; /**< In bytes -- will round to page boundary */
535};
536
537/**
538 * DRM_IOCTL_AGP_INFO ioctl argument type.
539 *
540 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
541 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
542 * drmAgpVendorId() and drmAgpDeviceId().
543 */
544struct drm_agp_info {
545 int agp_version_major;
546 int agp_version_minor;
547 unsigned long mode;
548 unsigned long aperture_base; /* physical address */
549 unsigned long aperture_size; /* bytes */
550 unsigned long memory_allowed; /* bytes */
551 unsigned long memory_used;
552
553 /* PCI information */
554 unsigned short id_vendor;
555 unsigned short id_device;
556};
557
558/**
559 * DRM_IOCTL_SG_ALLOC ioctl argument type.
560 */
561struct drm_scatter_gather {
562 unsigned long size; /**< In bytes -- will round to page boundary */
563 unsigned long handle; /**< Used for mapping / unmapping */
564};
565
566/**
567 * DRM_IOCTL_SET_VERSION ioctl argument type.
568 */
569struct drm_set_version {
570 int drm_di_major;
571 int drm_di_minor;
572 int drm_dd_major;
573 int drm_dd_minor;
574};
575
576#define DRM_IOCTL_BASE 'd'
577#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
578#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
579#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
580#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
581
582#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
583#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
584#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
585#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
586#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
587#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
588#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
589#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
590
591#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
592#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
593#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
594#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
595#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
596#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
597#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
598#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
599#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
600#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
601#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
602
603#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
604
605#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
606#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
607
608#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
609#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
610#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
611#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
612#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
613#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
614#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
615#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
616#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
617#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
618#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
619#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
620#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
621
622#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
623#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
624#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
625#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
626#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
627#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
628#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
629#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
630
631#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
632#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
633
634#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
635
636#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
637
638/**
639 * Device specific ioctls should only be in their respective headers
640 * The device specific ioctl range is from 0x40 to 0x99.
641 * Generic IOCTLS restart at 0xA0.
642 *
643 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
644 * drmCommandReadWrite().
645 */
646#define DRM_COMMAND_BASE 0x40
647#define DRM_COMMAND_END 0xA0
648
649/* typedef area */
650#ifndef __KERNEL__
651typedef struct drm_clip_rect drm_clip_rect_t;
652typedef struct drm_drawable_info drm_drawable_info_t;
653typedef struct drm_tex_region drm_tex_region_t;
654typedef struct drm_hw_lock drm_hw_lock_t;
655typedef struct drm_version drm_version_t;
656typedef struct drm_unique drm_unique_t;
657typedef struct drm_list drm_list_t;
658typedef struct drm_block drm_block_t;
659typedef struct drm_control drm_control_t;
660typedef enum drm_map_type drm_map_type_t;
661typedef enum drm_map_flags drm_map_flags_t;
662typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
663typedef struct drm_map drm_map_t;
664typedef struct drm_client drm_client_t;
665typedef enum drm_stat_type drm_stat_type_t;
666typedef struct drm_stats drm_stats_t;
667typedef enum drm_lock_flags drm_lock_flags_t;
668typedef struct drm_lock drm_lock_t;
669typedef enum drm_dma_flags drm_dma_flags_t;
670typedef struct drm_buf_desc drm_buf_desc_t;
671typedef struct drm_buf_info drm_buf_info_t;
672typedef struct drm_buf_free drm_buf_free_t;
673typedef struct drm_buf_pub drm_buf_pub_t;
674typedef struct drm_buf_map drm_buf_map_t;
675typedef struct drm_dma drm_dma_t;
676typedef union drm_wait_vblank drm_wait_vblank_t;
677typedef struct drm_agp_mode drm_agp_mode_t;
678typedef enum drm_ctx_flags drm_ctx_flags_t;
679typedef struct drm_ctx drm_ctx_t;
680typedef struct drm_ctx_res drm_ctx_res_t;
681typedef struct drm_draw drm_draw_t;
682typedef struct drm_update_draw drm_update_draw_t;
683typedef struct drm_auth drm_auth_t;
684typedef struct drm_irq_busid drm_irq_busid_t;
685typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
686
687typedef struct drm_agp_buffer drm_agp_buffer_t;
688typedef struct drm_agp_binding drm_agp_binding_t;
689typedef struct drm_agp_info drm_agp_info_t;
690typedef struct drm_scatter_gather drm_scatter_gather_t;
691typedef struct drm_set_version drm_set_version_t;
692#endif
693
694#endif
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
deleted file mode 100644
index 0764b662b339..000000000000
--- a/drivers/char/drm/drmP.h
+++ /dev/null
@@ -1,1153 +0,0 @@
1/**
2 * \file drmP.h
3 * Private header for Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All rights reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#ifndef _DRM_P_H_
35#define _DRM_P_H_
36
37/* If you want the memory alloc debug functionality, change define below */
38/* #define DEBUG_MEMORY */
39
40#ifdef __KERNEL__
41#ifdef __alpha__
42/* add include of current.h so that "current" is defined
43 * before static inline funcs in wait.h. Doing this so we
44 * can build the DRM (part of PI DRI). 4/21/2000 S + B */
45#include <asm/current.h>
46#endif /* __alpha__ */
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/miscdevice.h>
50#include <linux/fs.h>
51#include <linux/proc_fs.h>
52#include <linux/init.h>
53#include <linux/file.h>
54#include <linux/pci.h>
55#include <linux/jiffies.h>
56#include <linux/smp_lock.h> /* For (un)lock_kernel */
57#include <linux/dma-mapping.h>
58#include <linux/mm.h>
59#include <linux/cdev.h>
60#include <linux/mutex.h>
61#if defined(__alpha__) || defined(__powerpc__)
62#include <asm/pgtable.h> /* For pte_wrprotect */
63#endif
64#include <asm/io.h>
65#include <asm/mman.h>
66#include <asm/uaccess.h>
67#ifdef CONFIG_MTRR
68#include <asm/mtrr.h>
69#endif
70#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
71#include <linux/types.h>
72#include <linux/agp_backend.h>
73#endif
74#include <linux/workqueue.h>
75#include <linux/poll.h>
76#include <asm/pgalloc.h>
77#include "drm.h"
78
79#include <linux/idr.h>
80
81#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
82#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
83
84struct drm_file;
85struct drm_device;
86
87#include "drm_os_linux.h"
88#include "drm_hashtab.h"
89
90/***********************************************************************/
91/** \name DRM template customization defaults */
92/*@{*/
93
94/* driver capabilities and requirements mask */
95#define DRIVER_USE_AGP 0x1
96#define DRIVER_REQUIRE_AGP 0x2
97#define DRIVER_USE_MTRR 0x4
98#define DRIVER_PCI_DMA 0x8
99#define DRIVER_SG 0x10
100#define DRIVER_HAVE_DMA 0x20
101#define DRIVER_HAVE_IRQ 0x40
102#define DRIVER_IRQ_SHARED 0x80
103#define DRIVER_IRQ_VBL 0x100
104#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800
107
108/***********************************************************************/
109/** \name Begin the DRM... */
110/*@{*/
111
112#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
113 also include looping detection. */
114
115#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
116#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
117#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
118#define DRM_LOOPING_LIMIT 5000000
119#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
120#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
121
122#define DRM_FLAG_DEBUG 0x01
123
124#define DRM_MEM_DMA 0
125#define DRM_MEM_SAREA 1
126#define DRM_MEM_DRIVER 2
127#define DRM_MEM_MAGIC 3
128#define DRM_MEM_IOCTLS 4
129#define DRM_MEM_MAPS 5
130#define DRM_MEM_VMAS 6
131#define DRM_MEM_BUFS 7
132#define DRM_MEM_SEGS 8
133#define DRM_MEM_PAGES 9
134#define DRM_MEM_FILES 10
135#define DRM_MEM_QUEUES 11
136#define DRM_MEM_CMDS 12
137#define DRM_MEM_MAPPINGS 13
138#define DRM_MEM_BUFLISTS 14
139#define DRM_MEM_AGPLISTS 15
140#define DRM_MEM_TOTALAGP 16
141#define DRM_MEM_BOUNDAGP 17
142#define DRM_MEM_CTXBITMAP 18
143#define DRM_MEM_STUB 19
144#define DRM_MEM_SGLISTS 20
145#define DRM_MEM_CTXLIST 21
146#define DRM_MEM_MM 22
147#define DRM_MEM_HASHTAB 23
148
149#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
150#define DRM_MAP_HASH_OFFSET 0x10000000
151
152/*@}*/
153
154/***********************************************************************/
155/** \name Macros to make printk easier */
156/*@{*/
157
158/**
159 * Error output.
160 *
161 * \param fmt printf() like format string.
162 * \param arg arguments
163 */
164#define DRM_ERROR(fmt, arg...) \
165 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
166
167/**
168 * Memory error output.
169 *
170 * \param area memory area where the error occurred.
171 * \param fmt printf() like format string.
172 * \param arg arguments
173 */
174#define DRM_MEM_ERROR(area, fmt, arg...) \
175 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
176 drm_mem_stats[area].name , ##arg)
177
178#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
179
180/**
181 * Debug output.
182 *
183 * \param fmt printf() like format string.
184 * \param arg arguments
185 */
186#if DRM_DEBUG_CODE
187#define DRM_DEBUG(fmt, arg...) \
188 do { \
189 if ( drm_debug ) \
190 printk(KERN_DEBUG \
191 "[" DRM_NAME ":%s] " fmt , \
192 __func__ , ##arg); \
193 } while (0)
194#else
195#define DRM_DEBUG(fmt, arg...) do { } while (0)
196#endif
197
198#define DRM_PROC_LIMIT (PAGE_SIZE-80)
199
200#define DRM_PROC_PRINT(fmt, arg...) \
201 len += sprintf(&buf[len], fmt , ##arg); \
202 if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
203
204#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
205 len += sprintf(&buf[len], fmt , ##arg); \
206 if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
207
208/*@}*/
209
210/***********************************************************************/
211/** \name Internal types and structures */
212/*@{*/
213
214#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
215
216#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
217#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
218#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
219
220#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
221/**
222 * Get the private SAREA mapping.
223 *
224 * \param _dev DRM device.
225 * \param _ctx context number.
226 * \param _map output mapping.
227 */
228#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
229 (_map) = (_dev)->context_sareas[_ctx]; \
230} while(0)
231
232/**
233 * Test that the hardware lock is held by the caller, returning otherwise.
234 *
235 * \param dev DRM device.
236 * \param filp file pointer of the caller.
237 */
238#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
239do { \
240 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
241 dev->lock.file_priv != file_priv ) { \
242 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
243 __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
244 dev->lock.file_priv, file_priv ); \
245 return -EINVAL; \
246 } \
247} while (0)
248
249/**
250 * Copy and IOCTL return string to user space
251 */
252#define DRM_COPY( name, value ) \
253 len = strlen( value ); \
254 if ( len > name##_len ) len = name##_len; \
255 name##_len = strlen( value ); \
256 if ( len && name ) { \
257 if ( copy_to_user( name, value, len ) ) \
258 return -EFAULT; \
259 }
260
261/**
262 * Ioctl function type.
263 *
264 * \param inode device inode.
265 * \param file_priv DRM file private pointer.
266 * \param cmd command.
267 * \param arg argument.
268 */
269typedef int drm_ioctl_t(struct drm_device *dev, void *data,
270 struct drm_file *file_priv);
271
272typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
273 unsigned long arg);
274
275#define DRM_AUTH 0x1
276#define DRM_MASTER 0x2
277#define DRM_ROOT_ONLY 0x4
278
279struct drm_ioctl_desc {
280 unsigned int cmd;
281 drm_ioctl_t *func;
282 int flags;
283};
284
285/**
286 * Creates a driver or general drm_ioctl_desc array entry for the given
287 * ioctl, for use by drm_ioctl().
288 */
289#define DRM_IOCTL_DEF(ioctl, func, flags) \
290 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
291
292struct drm_magic_entry {
293 struct list_head head;
294 struct drm_hash_item hash_item;
295 struct drm_file *priv;
296};
297
298struct drm_vma_entry {
299 struct list_head head;
300 struct vm_area_struct *vma;
301 pid_t pid;
302};
303
304/**
305 * DMA buffer.
306 */
307struct drm_buf {
308 int idx; /**< Index into master buflist */
309 int total; /**< Buffer size */
310 int order; /**< log-base-2(total) */
311 int used; /**< Amount of buffer in use (for DMA) */
312 unsigned long offset; /**< Byte offset (used internally) */
313 void *address; /**< Address of buffer */
314 unsigned long bus_address; /**< Bus address of buffer */
315 struct drm_buf *next; /**< Kernel-only: used for free list */
316 __volatile__ int waiting; /**< On kernel DMA queue */
317 __volatile__ int pending; /**< On hardware DMA queue */
318 wait_queue_head_t dma_wait; /**< Processes waiting */
319 struct drm_file *file_priv; /**< Private of holding file descr */
320 int context; /**< Kernel queue for this buffer */
321 int while_locked; /**< Dispatch this buffer while locked */
322 enum {
323 DRM_LIST_NONE = 0,
324 DRM_LIST_FREE = 1,
325 DRM_LIST_WAIT = 2,
326 DRM_LIST_PEND = 3,
327 DRM_LIST_PRIO = 4,
328 DRM_LIST_RECLAIM = 5
329 } list; /**< Which list we're on */
330
331 int dev_priv_size; /**< Size of buffer private storage */
332 void *dev_private; /**< Per-buffer private storage */
333};
334
335/** bufs is one longer than it has to be */
336struct drm_waitlist {
337 int count; /**< Number of possible buffers */
338 struct drm_buf **bufs; /**< List of pointers to buffers */
339 struct drm_buf **rp; /**< Read pointer */
340 struct drm_buf **wp; /**< Write pointer */
341 struct drm_buf **end; /**< End pointer */
342 spinlock_t read_lock;
343 spinlock_t write_lock;
344};
345
346struct drm_freelist {
347 int initialized; /**< Freelist in use */
348 atomic_t count; /**< Number of free buffers */
349 struct drm_buf *next; /**< End pointer */
350
351 wait_queue_head_t waiting; /**< Processes waiting on free bufs */
352 int low_mark; /**< Low water mark */
353 int high_mark; /**< High water mark */
354 atomic_t wfh; /**< If waiting for high mark */
355 spinlock_t lock;
356};
357
358typedef struct drm_dma_handle {
359 dma_addr_t busaddr;
360 void *vaddr;
361 size_t size;
362} drm_dma_handle_t;
363
364/**
365 * Buffer entry. There is one of this for each buffer size order.
366 */
367struct drm_buf_entry {
368 int buf_size; /**< size */
369 int buf_count; /**< number of buffers */
370 struct drm_buf *buflist; /**< buffer list */
371 int seg_count;
372 int page_order;
373 struct drm_dma_handle **seglist;
374
375 struct drm_freelist freelist;
376};
377
378/** File private data */
379struct drm_file {
380 int authenticated;
381 int master;
382 pid_t pid;
383 uid_t uid;
384 drm_magic_t magic;
385 unsigned long ioctl_count;
386 struct list_head lhead;
387 struct drm_minor *minor;
388 int remove_auth_on_close;
389 unsigned long lock_count;
390 struct file *filp;
391 void *driver_priv;
392};
393
394/** Wait queue */
395struct drm_queue {
396 atomic_t use_count; /**< Outstanding uses (+1) */
397 atomic_t finalization; /**< Finalization in progress */
398 atomic_t block_count; /**< Count of processes waiting */
399 atomic_t block_read; /**< Queue blocked for reads */
400 wait_queue_head_t read_queue; /**< Processes waiting on block_read */
401 atomic_t block_write; /**< Queue blocked for writes */
402 wait_queue_head_t write_queue; /**< Processes waiting on block_write */
403 atomic_t total_queued; /**< Total queued statistic */
404 atomic_t total_flushed; /**< Total flushes statistic */
405 atomic_t total_locks; /**< Total locks statistics */
406 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
407 struct drm_waitlist waitlist; /**< Pending buffers */
408 wait_queue_head_t flush_queue; /**< Processes waiting until flush */
409};
410
411/**
412 * Lock data.
413 */
414struct drm_lock_data {
415 struct drm_hw_lock *hw_lock; /**< Hardware lock */
416 /** Private of lock holder's file (NULL=kernel) */
417 struct drm_file *file_priv;
418 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
419 unsigned long lock_time; /**< Time of last lock in jiffies */
420 spinlock_t spinlock;
421 uint32_t kernel_waiters;
422 uint32_t user_waiters;
423 int idle_has_lock;
424};
425
426/**
427 * DMA data.
428 */
429struct drm_device_dma {
430
431 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
432 int buf_count; /**< total number of buffers */
433 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
434 int seg_count;
435 int page_count; /**< number of pages */
436 unsigned long *pagelist; /**< page list */
437 unsigned long byte_count;
438 enum {
439 _DRM_DMA_USE_AGP = 0x01,
440 _DRM_DMA_USE_SG = 0x02,
441 _DRM_DMA_USE_FB = 0x04,
442 _DRM_DMA_USE_PCI_RO = 0x08
443 } flags;
444
445};
446
447/**
448 * AGP memory entry. Stored as a doubly linked list.
449 */
450struct drm_agp_mem {
451 unsigned long handle; /**< handle */
452 DRM_AGP_MEM *memory;
453 unsigned long bound; /**< address */
454 int pages;
455 struct list_head head;
456};
457
458/**
459 * AGP data.
460 *
461 * \sa drm_agp_init() and drm_device::agp.
462 */
463struct drm_agp_head {
464 DRM_AGP_KERN agp_info; /**< AGP device information */
465 struct list_head memory;
466 unsigned long mode; /**< AGP mode */
467 struct agp_bridge_data *bridge;
468 int enabled; /**< whether the AGP bus as been enabled */
469 int acquired; /**< whether the AGP device has been acquired */
470 unsigned long base;
471 int agp_mtrr;
472 int cant_use_aperture;
473 unsigned long page_mask;
474};
475
476/**
477 * Scatter-gather memory.
478 */
479struct drm_sg_mem {
480 unsigned long handle;
481 void *virtual;
482 int pages;
483 struct page **pagelist;
484 dma_addr_t *busaddr;
485};
486
487struct drm_sigdata {
488 int context;
489 struct drm_hw_lock *lock;
490};
491
492
493/*
494 * Generic memory manager structs
495 */
496
497struct drm_mm_node {
498 struct list_head fl_entry;
499 struct list_head ml_entry;
500 int free;
501 unsigned long start;
502 unsigned long size;
503 struct drm_mm *mm;
504 void *private;
505};
506
507struct drm_mm {
508 struct list_head fl_entry;
509 struct list_head ml_entry;
510};
511
512
513/**
514 * Mappings list
515 */
516struct drm_map_list {
517 struct list_head head; /**< list head */
518 struct drm_hash_item hash;
519 struct drm_map *map; /**< mapping */
520 uint64_t user_token;
521};
522
523typedef struct drm_map drm_local_map_t;
524
525/**
526 * Context handle list
527 */
528struct drm_ctx_list {
529 struct list_head head; /**< list head */
530 drm_context_t handle; /**< context handle */
531 struct drm_file *tag; /**< associated fd private data */
532};
533
534struct drm_vbl_sig {
535 struct list_head head;
536 unsigned int sequence;
537 struct siginfo info;
538 struct task_struct *task;
539};
540
541/* location of GART table */
542#define DRM_ATI_GART_MAIN 1
543#define DRM_ATI_GART_FB 2
544
545#define DRM_ATI_GART_PCI 1
546#define DRM_ATI_GART_PCIE 2
547#define DRM_ATI_GART_IGP 3
548
549struct drm_ati_pcigart_info {
550 int gart_table_location;
551 int gart_reg_if;
552 void *addr;
553 dma_addr_t bus_addr;
554 dma_addr_t table_mask;
555 struct drm_dma_handle *table_handle;
556 drm_local_map_t mapping;
557 int table_size;
558};
559
560/**
561 * DRM driver structure. This structure represent the common code for
562 * a family of cards. There will one drm_device for each card present
563 * in this family
564 */
565struct drm_driver {
566 int (*load) (struct drm_device *, unsigned long flags);
567 int (*firstopen) (struct drm_device *);
568 int (*open) (struct drm_device *, struct drm_file *);
569 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
570 void (*postclose) (struct drm_device *, struct drm_file *);
571 void (*lastclose) (struct drm_device *);
572 int (*unload) (struct drm_device *);
573 int (*suspend) (struct drm_device *, pm_message_t state);
574 int (*resume) (struct drm_device *);
575 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
576 void (*dma_ready) (struct drm_device *);
577 int (*dma_quiescent) (struct drm_device *);
578 int (*context_ctor) (struct drm_device *dev, int context);
579 int (*context_dtor) (struct drm_device *dev, int context);
580 int (*kernel_context_switch) (struct drm_device *dev, int old,
581 int new);
582 void (*kernel_context_switch_unlock) (struct drm_device *dev);
583 int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
584 int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
585 int (*dri_library_name) (struct drm_device *dev, char *buf);
586
587 /**
588 * Called by \c drm_device_is_agp. Typically used to determine if a
589 * card is really attached to AGP or not.
590 *
591 * \param dev DRM device handle
592 *
593 * \returns
594 * One of three values is returned depending on whether or not the
595 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
596 * (return of 1), or may or may not be AGP (return of 2).
597 */
598 int (*device_is_agp) (struct drm_device *dev);
599
600 /* these have to be filled in */
601
602 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
603 void (*irq_preinstall) (struct drm_device *dev);
604 void (*irq_postinstall) (struct drm_device *dev);
605 void (*irq_uninstall) (struct drm_device *dev);
606 void (*reclaim_buffers) (struct drm_device *dev,
607 struct drm_file * file_priv);
608 void (*reclaim_buffers_locked) (struct drm_device *dev,
609 struct drm_file *file_priv);
610 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
611 struct drm_file *file_priv);
612 unsigned long (*get_map_ofs) (struct drm_map * map);
613 unsigned long (*get_reg_ofs) (struct drm_device *dev);
614 void (*set_version) (struct drm_device *dev,
615 struct drm_set_version *sv);
616
617 int major;
618 int minor;
619 int patchlevel;
620 char *name;
621 char *desc;
622 char *date;
623
624 u32 driver_features;
625 int dev_priv_size;
626 struct drm_ioctl_desc *ioctls;
627 int num_ioctls;
628 struct file_operations fops;
629 struct pci_driver pci_driver;
630};
631
632#define DRM_MINOR_UNASSIGNED 0
633#define DRM_MINOR_LEGACY 1
634
635/**
636 * DRM minor structure. This structure represents a drm minor number.
637 */
638struct drm_minor {
639 int index; /**< Minor device number */
640 int type; /**< Control or render */
641 dev_t device; /**< Device number for mknod */
642 struct device kdev; /**< Linux device */
643 struct drm_device *dev;
644 struct proc_dir_entry *dev_root; /**< proc directory entry */
645};
646
647/**
648 * DRM device structure. This structure represent a complete card that
649 * may contain multiple heads.
650 */
651struct drm_device {
652 char *unique; /**< Unique identifier: e.g., busid */
653 int unique_len; /**< Length of unique field */
654 char *devname; /**< For /proc/interrupts */
655 int if_version; /**< Highest interface version set */
656
657 int blocked; /**< Blocked due to VC switch? */
658
659 /** \name Locks */
660 /*@{ */
661 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
662 struct mutex struct_mutex; /**< For others */
663 /*@} */
664
665 /** \name Usage Counters */
666 /*@{ */
667 int open_count; /**< Outstanding files open */
668 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
669 atomic_t vma_count; /**< Outstanding vma areas open */
670 int buf_use; /**< Buffers in use -- cannot alloc */
671 atomic_t buf_alloc; /**< Buffer allocation in progress */
672 /*@} */
673
674 /** \name Performance counters */
675 /*@{ */
676 unsigned long counters;
677 enum drm_stat_type types[15];
678 atomic_t counts[15];
679 /*@} */
680
681 /** \name Authentication */
682 /*@{ */
683 struct list_head filelist;
684 struct drm_open_hash magiclist; /**< magic hash table */
685 struct list_head magicfree;
686 /*@} */
687
688 /** \name Memory management */
689 /*@{ */
690 struct list_head maplist; /**< Linked list of regions */
691 int map_count; /**< Number of mappable regions */
692 struct drm_open_hash map_hash; /**< User token hash table for maps */
693
694 /** \name Context handle management */
695 /*@{ */
696 struct list_head ctxlist; /**< Linked list of context handles */
697 int ctx_count; /**< Number of context handles */
698 struct mutex ctxlist_mutex; /**< For ctxlist */
699
700 struct idr ctx_idr;
701
702 struct list_head vmalist; /**< List of vmas (for debugging) */
703 struct drm_lock_data lock; /**< Information on hardware lock */
704 /*@} */
705
706 /** \name DMA queues (contexts) */
707 /*@{ */
708 int queue_count; /**< Number of active DMA queues */
709 int queue_reserved; /**< Number of reserved DMA queues */
710 int queue_slots; /**< Actual length of queuelist */
711 struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
712 struct drm_device_dma *dma; /**< Optional pointer for DMA support */
713 /*@} */
714
715 /** \name Context support */
716 /*@{ */
717 int irq; /**< Interrupt used by board */
718 int irq_enabled; /**< True if irq handler is enabled */
719 __volatile__ long context_flag; /**< Context swapping flag */
720 __volatile__ long interrupt_flag; /**< Interruption handler flag */
721 __volatile__ long dma_flag; /**< DMA dispatch flag */
722 struct timer_list timer; /**< Timer for delaying ctx switch */
723 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
724 int last_checked; /**< Last context checked for DMA */
725 int last_context; /**< Last current context */
726 unsigned long last_switch; /**< jiffies at last context switch */
727 /*@} */
728
729 struct work_struct work;
730 /** \name VBLANK IRQ support */
731 /*@{ */
732
733 wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
734 atomic_t vbl_received;
735 atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
736 spinlock_t vbl_lock;
737 struct list_head vbl_sigs; /**< signal list to send on VBLANK */
738 struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
739 unsigned int vbl_pending;
740 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
741 void (*locked_tasklet_func)(struct drm_device *dev);
742
743 /*@} */
744 cycles_t ctx_start;
745 cycles_t lck_start;
746
747 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
748 wait_queue_head_t buf_readers; /**< Processes waiting to read */
749 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
750
751 struct drm_agp_head *agp; /**< AGP data */
752
753 struct pci_dev *pdev; /**< PCI device structure */
754 int pci_vendor; /**< PCI vendor id */
755 int pci_device; /**< PCI device id */
756#ifdef __alpha__
757 struct pci_controller *hose;
758#endif
759 struct drm_sg_mem *sg; /**< Scatter gather memory */
760 void *dev_private; /**< device private data */
761 struct drm_sigdata sigdata; /**< For block_all_signals */
762 sigset_t sigmask;
763
764 struct drm_driver *driver;
765 drm_local_map_t *agp_buffer_map;
766 unsigned int agp_buffer_token;
767 struct drm_minor *primary; /**< render type primary screen head */
768
769 /** \name Drawable information */
770 /*@{ */
771 spinlock_t drw_lock;
772 struct idr drw_idr;
773 /*@} */
774};
775
776static __inline__ int drm_core_check_feature(struct drm_device *dev,
777 int feature)
778{
779 return ((dev->driver->driver_features & feature) ? 1 : 0);
780}
781
782#ifdef __alpha__
783#define drm_get_pci_domain(dev) dev->hose->index
784#else
785#define drm_get_pci_domain(dev) 0
786#endif
787
788#if __OS_HAS_AGP
789static inline int drm_core_has_AGP(struct drm_device *dev)
790{
791 return drm_core_check_feature(dev, DRIVER_USE_AGP);
792}
793#else
794#define drm_core_has_AGP(dev) (0)
795#endif
796
797#if __OS_HAS_MTRR
798static inline int drm_core_has_MTRR(struct drm_device *dev)
799{
800 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
801}
802
803#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
804
805static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
806 unsigned int flags)
807{
808 return mtrr_add(offset, size, flags, 1);
809}
810
811static inline int drm_mtrr_del(int handle, unsigned long offset,
812 unsigned long size, unsigned int flags)
813{
814 return mtrr_del(handle, offset, size);
815}
816
817#else
818#define drm_core_has_MTRR(dev) (0)
819
820#define DRM_MTRR_WC 0
821
822static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
823 unsigned int flags)
824{
825 return 0;
826}
827
828static inline int drm_mtrr_del(int handle, unsigned long offset,
829 unsigned long size, unsigned int flags)
830{
831 return 0;
832}
833#endif
834
835/******************************************************************/
836/** \name Internal function definitions */
837/*@{*/
838
839 /* Driver support (drm_drv.h) */
840extern int drm_init(struct drm_driver *driver);
841extern void drm_exit(struct drm_driver *driver);
842extern int drm_ioctl(struct inode *inode, struct file *filp,
843 unsigned int cmd, unsigned long arg);
844extern long drm_compat_ioctl(struct file *filp,
845 unsigned int cmd, unsigned long arg);
846extern int drm_lastclose(struct drm_device *dev);
847
848 /* Device support (drm_fops.h) */
849extern int drm_open(struct inode *inode, struct file *filp);
850extern int drm_stub_open(struct inode *inode, struct file *filp);
851extern int drm_fasync(int fd, struct file *filp, int on);
852extern int drm_release(struct inode *inode, struct file *filp);
853
854 /* Mapping support (drm_vm.h) */
855extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
856extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
857extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
858extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
859
860 /* Memory management support (drm_memory.h) */
861#include "drm_memory.h"
862extern void drm_mem_init(void);
863extern int drm_mem_info(char *buf, char **start, off_t offset,
864 int request, int *eof, void *data);
865extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
866
867extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
868extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
869extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
870extern int drm_unbind_agp(DRM_AGP_MEM * handle);
871
872 /* Misc. IOCTL support (drm_ioctl.h) */
873extern int drm_irq_by_busid(struct drm_device *dev, void *data,
874 struct drm_file *file_priv);
875extern int drm_getunique(struct drm_device *dev, void *data,
876 struct drm_file *file_priv);
877extern int drm_setunique(struct drm_device *dev, void *data,
878 struct drm_file *file_priv);
879extern int drm_getmap(struct drm_device *dev, void *data,
880 struct drm_file *file_priv);
881extern int drm_getclient(struct drm_device *dev, void *data,
882 struct drm_file *file_priv);
883extern int drm_getstats(struct drm_device *dev, void *data,
884 struct drm_file *file_priv);
885extern int drm_setversion(struct drm_device *dev, void *data,
886 struct drm_file *file_priv);
887extern int drm_noop(struct drm_device *dev, void *data,
888 struct drm_file *file_priv);
889
890 /* Context IOCTL support (drm_context.h) */
891extern int drm_resctx(struct drm_device *dev, void *data,
892 struct drm_file *file_priv);
893extern int drm_addctx(struct drm_device *dev, void *data,
894 struct drm_file *file_priv);
895extern int drm_modctx(struct drm_device *dev, void *data,
896 struct drm_file *file_priv);
897extern int drm_getctx(struct drm_device *dev, void *data,
898 struct drm_file *file_priv);
899extern int drm_switchctx(struct drm_device *dev, void *data,
900 struct drm_file *file_priv);
901extern int drm_newctx(struct drm_device *dev, void *data,
902 struct drm_file *file_priv);
903extern int drm_rmctx(struct drm_device *dev, void *data,
904 struct drm_file *file_priv);
905
906extern int drm_ctxbitmap_init(struct drm_device *dev);
907extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
908extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
909
910extern int drm_setsareactx(struct drm_device *dev, void *data,
911 struct drm_file *file_priv);
912extern int drm_getsareactx(struct drm_device *dev, void *data,
913 struct drm_file *file_priv);
914
915 /* Drawable IOCTL support (drm_drawable.h) */
916extern int drm_adddraw(struct drm_device *dev, void *data,
917 struct drm_file *file_priv);
918extern int drm_rmdraw(struct drm_device *dev, void *data,
919 struct drm_file *file_priv);
920extern int drm_update_drawable_info(struct drm_device *dev, void *data,
921 struct drm_file *file_priv);
922extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
923 drm_drawable_t id);
924extern void drm_drawable_free_all(struct drm_device *dev);
925
926 /* Authentication IOCTL support (drm_auth.h) */
927extern int drm_getmagic(struct drm_device *dev, void *data,
928 struct drm_file *file_priv);
929extern int drm_authmagic(struct drm_device *dev, void *data,
930 struct drm_file *file_priv);
931
932 /* Locking IOCTL support (drm_lock.h) */
933extern int drm_lock(struct drm_device *dev, void *data,
934 struct drm_file *file_priv);
935extern int drm_unlock(struct drm_device *dev, void *data,
936 struct drm_file *file_priv);
937extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
938extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
939extern void drm_idlelock_take(struct drm_lock_data *lock_data);
940extern void drm_idlelock_release(struct drm_lock_data *lock_data);
941
942/*
943 * These are exported to drivers so that they can implement fencing using
944 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
945 */
946
947extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
948
949 /* Buffer management support (drm_bufs.h) */
950extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
951extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
952extern int drm_addmap(struct drm_device *dev, unsigned int offset,
953 unsigned int size, enum drm_map_type type,
954 enum drm_map_flags flags, drm_local_map_t ** map_ptr);
955extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
956 struct drm_file *file_priv);
957extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
958extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
959extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
960 struct drm_file *file_priv);
961extern int drm_addbufs(struct drm_device *dev, void *data,
962 struct drm_file *file_priv);
963extern int drm_infobufs(struct drm_device *dev, void *data,
964 struct drm_file *file_priv);
965extern int drm_markbufs(struct drm_device *dev, void *data,
966 struct drm_file *file_priv);
967extern int drm_freebufs(struct drm_device *dev, void *data,
968 struct drm_file *file_priv);
969extern int drm_mapbufs(struct drm_device *dev, void *data,
970 struct drm_file *file_priv);
971extern int drm_order(unsigned long size);
972extern unsigned long drm_get_resource_start(struct drm_device *dev,
973 unsigned int resource);
974extern unsigned long drm_get_resource_len(struct drm_device *dev,
975 unsigned int resource);
976
977 /* DMA support (drm_dma.h) */
978extern int drm_dma_setup(struct drm_device *dev);
979extern void drm_dma_takedown(struct drm_device *dev);
980extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
981extern void drm_core_reclaim_buffers(struct drm_device *dev,
982 struct drm_file *filp);
983
984 /* IRQ support (drm_irq.h) */
985extern int drm_control(struct drm_device *dev, void *data,
986 struct drm_file *file_priv);
987extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
988extern int drm_irq_uninstall(struct drm_device *dev);
989extern void drm_driver_irq_preinstall(struct drm_device *dev);
990extern void drm_driver_irq_postinstall(struct drm_device *dev);
991extern void drm_driver_irq_uninstall(struct drm_device *dev);
992
993extern int drm_wait_vblank(struct drm_device *dev, void *data,
994 struct drm_file *file_priv);
995extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
996extern void drm_vbl_send_signals(struct drm_device *dev);
997extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
998
999 /* AGP/GART support (drm_agpsupport.h) */
1000extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
1001extern int drm_agp_acquire(struct drm_device *dev);
1002extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1003 struct drm_file *file_priv);
1004extern int drm_agp_release(struct drm_device *dev);
1005extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv);
1007extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
1008extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1009 struct drm_file *file_priv);
1010extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1011extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1012 struct drm_file *file_priv);
1013extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
1014extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1015 struct drm_file *file_priv);
1016extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
1017extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1018 struct drm_file *file_priv);
1019extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1020extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1021 struct drm_file *file_priv);
1022extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1023extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1024 struct drm_file *file_priv);
1025extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
1026extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1027extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1028extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1029
1030 /* Stub support (drm_stub.h) */
1031extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
1032 struct drm_driver *driver);
1033extern int drm_put_dev(struct drm_device *dev);
1034extern int drm_put_minor(struct drm_minor **minor);
1035extern unsigned int drm_debug;
1036
1037extern struct class *drm_class;
1038extern struct proc_dir_entry *drm_proc_root;
1039
1040extern struct idr drm_minors_idr;
1041
1042extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
1043
1044 /* Proc support (drm_proc.h) */
1045extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1046 struct proc_dir_entry *root);
1047extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1048
1049 /* Scatter Gather Support (drm_scatter.h) */
1050extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1051extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1053extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1054extern int drm_sg_free(struct drm_device *dev, void *data,
1055 struct drm_file *file_priv);
1056
1057 /* ATI PCIGART support (ati_pcigart.h) */
1058extern int drm_ati_pcigart_init(struct drm_device *dev,
1059 struct drm_ati_pcigart_info * gart_info);
1060extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1061 struct drm_ati_pcigart_info * gart_info);
1062
1063extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1064 size_t align, dma_addr_t maxaddr);
1065extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1066extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1067
1068 /* sysfs support (drm_sysfs.c) */
1069struct drm_sysfs_class;
1070extern struct class *drm_sysfs_create(struct module *owner, char *name);
1071extern void drm_sysfs_destroy(void);
1072extern int drm_sysfs_device_add(struct drm_minor *minor);
1073extern void drm_sysfs_device_remove(struct drm_minor *minor);
1074
1075/*
1076 * Basic memory manager support (drm_mm.c)
1077 */
1078extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
1079 unsigned long size,
1080 unsigned alignment);
1081extern void drm_mm_put_block(struct drm_mm_node * cur);
1082extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
1083 unsigned alignment, int best_match);
1084extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
1085extern void drm_mm_takedown(struct drm_mm *mm);
1086extern int drm_mm_clean(struct drm_mm *mm);
1087extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1088extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1089extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1090
1091extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1092extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
1093
1094static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
1095 unsigned int token)
1096{
1097 struct drm_map_list *_entry;
1098 list_for_each_entry(_entry, &dev->maplist, head)
1099 if (_entry->user_token == token)
1100 return _entry->map;
1101 return NULL;
1102}
1103
1104static __inline__ int drm_device_is_agp(struct drm_device *dev)
1105{
1106 if (dev->driver->device_is_agp != NULL) {
1107 int err = (*dev->driver->device_is_agp) (dev);
1108
1109 if (err != 2) {
1110 return err;
1111 }
1112 }
1113
1114 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1115}
1116
1117static __inline__ int drm_device_is_pcie(struct drm_device *dev)
1118{
1119 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
1120}
1121
1122static __inline__ void drm_core_dropmap(struct drm_map *map)
1123{
1124}
1125
1126#ifndef DEBUG_MEMORY
1127/** Wrapper around kmalloc() */
1128static __inline__ void *drm_alloc(size_t size, int area)
1129{
1130 return kmalloc(size, GFP_KERNEL);
1131}
1132
1133/** Wrapper around kfree() */
1134static __inline__ void drm_free(void *pt, size_t size, int area)
1135{
1136 kfree(pt);
1137}
1138
1139/** Wrapper around kcalloc() */
1140static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
1141{
1142 return kcalloc(nmemb, size, GFP_KERNEL);
1143}
1144#else
1145extern void *drm_alloc(size_t size, int area);
1146extern void drm_free(void *pt, size_t size, int area);
1147extern void *drm_calloc(size_t nmemb, size_t size, int area);
1148#endif
1149
1150/*@}*/
1151
1152#endif /* __KERNEL__ */
1153#endif
diff --git a/drivers/char/drm/drm_core.h b/drivers/char/drm/drm_core.h
deleted file mode 100644
index 316739036079..000000000000
--- a/drivers/char/drm/drm_core.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
24
25#define CORE_NAME "drm"
26#define CORE_DESC "DRM shared core routines"
27#define CORE_DATE "20060810"
28
29#define DRM_IF_MAJOR 1
30#define DRM_IF_MINOR 3
31
32#define CORE_MAJOR 1
33#define CORE_MINOR 1
34#define CORE_PATCHLEVEL 0
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
deleted file mode 100644
index cd2b189e1be6..000000000000
--- a/drivers/char/drm/drm_hashtab.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple open hash tab implementation.
30 *
31 * Authors:
32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 */
34
35#ifndef DRM_HASHTAB_H
36#define DRM_HASHTAB_H
37
38#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
39
40struct drm_hash_item {
41 struct hlist_node head;
42 unsigned long key;
43};
44
45struct drm_open_hash {
46 unsigned int size;
47 unsigned int order;
48 unsigned int fill;
49 struct hlist_head *table;
50 int use_vmalloc;
51};
52
53
54extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
55extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
56extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
57 unsigned long seed, int bits, int shift,
58 unsigned long add);
59extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
60
61extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
62extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
63extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
64extern void drm_ht_remove(struct drm_open_hash *ht);
65
66
67#endif
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
deleted file mode 100644
index 63e425b5ea82..000000000000
--- a/drivers/char/drm/drm_memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/**
2 * \file drm_memory.h
3 * Memory management wrappers for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/highmem.h>
37#include <linux/vmalloc.h>
38#include "drmP.h"
39
40/**
41 * Cut down version of drm_memory_debug.h, which used to be called
42 * drm_memory.h.
43 */
44
45#if __OS_HAS_AGP
46
47#include <linux/vmalloc.h>
48
49#ifdef HAVE_PAGE_AGP
50#include <asm/agp.h>
51#else
52# ifdef __powerpc__
53# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
54# else
55# define PAGE_AGP PAGE_KERNEL
56# endif
57#endif
58
59#else /* __OS_HAS_AGP */
60
61#endif
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
deleted file mode 100644
index 6463271deea8..000000000000
--- a/drivers/char/drm/drm_memory_debug.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/**
2 * \file drm_memory_debug.h
3 * Memory management wrappers for DRM.
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#include "drmP.h"
35
36typedef struct drm_mem_stats {
37 const char *name;
38 int succeed_count;
39 int free_count;
40 int fail_count;
41 unsigned long bytes_allocated;
42 unsigned long bytes_freed;
43} drm_mem_stats_t;
44
45static DEFINE_SPINLOCK(drm_mem_lock);
46static unsigned long drm_ram_available = 0; /* In pages */
47static unsigned long drm_ram_used = 0;
48static drm_mem_stats_t drm_mem_stats[] =
49{
50 [DRM_MEM_DMA] = {"dmabufs"},
51 [DRM_MEM_SAREA] = {"sareas"},
52 [DRM_MEM_DRIVER] = {"driver"},
53 [DRM_MEM_MAGIC] = {"magic"},
54 [DRM_MEM_IOCTLS] = {"ioctltab"},
55 [DRM_MEM_MAPS] = {"maplist"},
56 [DRM_MEM_VMAS] = {"vmalist"},
57 [DRM_MEM_BUFS] = {"buflist"},
58 [DRM_MEM_SEGS] = {"seglist"},
59 [DRM_MEM_PAGES] = {"pagelist"},
60 [DRM_MEM_FILES] = {"files"},
61 [DRM_MEM_QUEUES] = {"queues"},
62 [DRM_MEM_CMDS] = {"commands"},
63 [DRM_MEM_MAPPINGS] = {"mappings"},
64 [DRM_MEM_BUFLISTS] = {"buflists"},
65 [DRM_MEM_AGPLISTS] = {"agplist"},
66 [DRM_MEM_SGLISTS] = {"sglist"},
67 [DRM_MEM_TOTALAGP] = {"totalagp"},
68 [DRM_MEM_BOUNDAGP] = {"boundagp"},
69 [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
70 [DRM_MEM_CTXLIST] = {"ctxlist"},
71 [DRM_MEM_STUB] = {"stub"},
72 {NULL, 0,} /* Last entry must be null */
73};
74
75void drm_mem_init (void) {
76 drm_mem_stats_t *mem;
77 struct sysinfo si;
78
79 for (mem = drm_mem_stats; mem->name; ++mem) {
80 mem->succeed_count = 0;
81 mem->free_count = 0;
82 mem->fail_count = 0;
83 mem->bytes_allocated = 0;
84 mem->bytes_freed = 0;
85 }
86
87 si_meminfo(&si);
88 drm_ram_available = si.totalram;
89 drm_ram_used = 0;
90}
91
92/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
93
94static int drm__mem_info (char *buf, char **start, off_t offset,
95 int request, int *eof, void *data) {
96 drm_mem_stats_t *pt;
97 int len = 0;
98
99 if (offset > DRM_PROC_LIMIT) {
100 *eof = 1;
101 return 0;
102 }
103
104 *eof = 0;
105 *start = &buf[offset];
106
107 DRM_PROC_PRINT(" total counts "
108 " | outstanding \n");
109 DRM_PROC_PRINT("type alloc freed fail bytes freed"
110 " | allocs bytes\n\n");
111 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
112 "system", 0, 0, 0,
113 drm_ram_available << (PAGE_SHIFT - 10));
114 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
115 "locked", 0, 0, 0, drm_ram_used >> 10);
116 DRM_PROC_PRINT("\n");
117 for (pt = drm_mem_stats; pt->name; pt++) {
118 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
119 pt->name,
120 pt->succeed_count,
121 pt->free_count,
122 pt->fail_count,
123 pt->bytes_allocated,
124 pt->bytes_freed,
125 pt->succeed_count - pt->free_count,
126 (long)pt->bytes_allocated
127 - (long)pt->bytes_freed);
128 }
129
130 if (len > request + offset)
131 return request;
132 *eof = 1;
133 return len - offset;
134}
135
136int drm_mem_info (char *buf, char **start, off_t offset,
137 int len, int *eof, void *data) {
138 int ret;
139
140 spin_lock(&drm_mem_lock);
141 ret = drm__mem_info (buf, start, offset, len, eof, data);
142 spin_unlock(&drm_mem_lock);
143 return ret;
144}
145
146void *drm_alloc (size_t size, int area) {
147 void *pt;
148
149 if (!size) {
150 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
151 return NULL;
152 }
153
154 if (!(pt = kmalloc(size, GFP_KERNEL))) {
155 spin_lock(&drm_mem_lock);
156 ++drm_mem_stats[area].fail_count;
157 spin_unlock(&drm_mem_lock);
158 return NULL;
159 }
160 spin_lock(&drm_mem_lock);
161 ++drm_mem_stats[area].succeed_count;
162 drm_mem_stats[area].bytes_allocated += size;
163 spin_unlock(&drm_mem_lock);
164 return pt;
165}
166
167void *drm_calloc (size_t nmemb, size_t size, int area) {
168 void *addr;
169
170 addr = drm_alloc (nmemb * size, area);
171 if (addr != NULL)
172 memset((void *)addr, 0, size * nmemb);
173
174 return addr;
175}
176
177void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
178 void *pt;
179
180 if (!(pt = drm_alloc (size, area)))
181 return NULL;
182 if (oldpt && oldsize) {
183 memcpy(pt, oldpt, oldsize);
184 drm_free (oldpt, oldsize, area);
185 }
186 return pt;
187}
188
189void drm_free (void *pt, size_t size, int area) {
190 int alloc_count;
191 int free_count;
192
193 if (!pt)
194 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
195 else
196 kfree(pt);
197 spin_lock(&drm_mem_lock);
198 drm_mem_stats[area].bytes_freed += size;
199 free_count = ++drm_mem_stats[area].free_count;
200 alloc_count = drm_mem_stats[area].succeed_count;
201 spin_unlock(&drm_mem_lock);
202 if (free_count > alloc_count) {
203 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
204 free_count, alloc_count);
205 }
206}
207
208#if __OS_HAS_AGP
209
210DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
211 DRM_AGP_MEM *handle;
212
213 if (!pages) {
214 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
215 return NULL;
216 }
217
218 if ((handle = drm_agp_allocate_memory (pages, type))) {
219 spin_lock(&drm_mem_lock);
220 ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
221 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
222 += pages << PAGE_SHIFT;
223 spin_unlock(&drm_mem_lock);
224 return handle;
225 }
226 spin_lock(&drm_mem_lock);
227 ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
228 spin_unlock(&drm_mem_lock);
229 return NULL;
230}
231
232int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
233 int alloc_count;
234 int free_count;
235 int retval = -EINVAL;
236
237 if (!handle) {
238 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
239 "Attempt to free NULL AGP handle\n");
240 return retval;
241 }
242
243 if (drm_agp_free_memory (handle)) {
244 spin_lock(&drm_mem_lock);
245 free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
246 alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
247 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
248 += pages << PAGE_SHIFT;
249 spin_unlock(&drm_mem_lock);
250 if (free_count > alloc_count) {
251 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
252 "Excess frees: %d frees, %d allocs\n",
253 free_count, alloc_count);
254 }
255 return 0;
256 }
257 return retval;
258}
259
260int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
261 int retcode = -EINVAL;
262
263 if (!handle) {
264 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
265 "Attempt to bind NULL AGP handle\n");
266 return retcode;
267 }
268
269 if (!(retcode = drm_agp_bind_memory (handle, start))) {
270 spin_lock(&drm_mem_lock);
271 ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
272 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
273 += handle->page_count << PAGE_SHIFT;
274 spin_unlock(&drm_mem_lock);
275 return retcode;
276 }
277 spin_lock(&drm_mem_lock);
278 ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
279 spin_unlock(&drm_mem_lock);
280 return retcode;
281}
282
283int drm_unbind_agp (DRM_AGP_MEM * handle) {
284 int alloc_count;
285 int free_count;
286 int retcode = -EINVAL;
287
288 if (!handle) {
289 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
290 "Attempt to unbind NULL AGP handle\n");
291 return retcode;
292 }
293
294 if ((retcode = drm_agp_unbind_memory (handle)))
295 return retcode;
296 spin_lock(&drm_mem_lock);
297 free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
298 alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
299 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
300 += handle->page_count << PAGE_SHIFT;
301 spin_unlock(&drm_mem_lock);
302 if (free_count > alloc_count) {
303 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
304 "Excess frees: %d frees, %d allocs\n",
305 free_count, alloc_count);
306 }
307 return retcode;
308}
309#endif
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
deleted file mode 100644
index 8dbd2572b7c3..000000000000
--- a/drivers/char/drm/drm_os_linux.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/**
2 * \file drm_os_linux.h
3 * OS abstraction macros.
4 */
5
6#include <linux/interrupt.h> /* For task queue support */
7#include <linux/delay.h>
8
9/** Current process ID */
10#define DRM_CURRENTPID task_pid_nr(current)
11#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
12#define DRM_UDELAY(d) udelay(d)
13/** Read a byte from a MMIO region */
14#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
15/** Read a word from a MMIO region */
16#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
17/** Read a dword from a MMIO region */
18#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
19/** Write a byte into a MMIO region */
20#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
21/** Write a word into a MMIO region */
22#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
23/** Write a dword into a MMIO region */
24#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
25/** Read memory barrier */
26#define DRM_READMEMORYBARRIER() rmb()
27/** Write memory barrier */
28#define DRM_WRITEMEMORYBARRIER() wmb()
29/** Read/write memory barrier */
30#define DRM_MEMORYBARRIER() mb()
31
32/** IRQ handler arguments and return type and values */
33#define DRM_IRQ_ARGS int irq, void *arg
34
35/** AGP types */
36#if __OS_HAS_AGP
37#define DRM_AGP_MEM struct agp_memory
38#define DRM_AGP_KERN struct agp_kern_info
39#else
40/* define some dummy types for non AGP supporting kernels */
41struct no_agp_kern {
42 unsigned long aper_base;
43 unsigned long aper_size;
44};
45#define DRM_AGP_MEM int
46#define DRM_AGP_KERN struct no_agp_kern
47#endif
48
49#if !(__OS_HAS_MTRR)
50static __inline__ int mtrr_add(unsigned long base, unsigned long size,
51 unsigned int type, char increment)
52{
53 return -ENODEV;
54}
55
56static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
57{
58 return -ENODEV;
59}
60
61#define MTRR_TYPE_WRCOMB 1
62
63#endif
64
65/** Other copying of data to kernel space */
66#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \
67 copy_from_user(arg1, arg2, arg3)
68/** Other copying of data from kernel space */
69#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
70 copy_to_user(arg1, arg2, arg3)
71/* Macros for copyfrom user, but checking readability only once */
72#define DRM_VERIFYAREA_READ( uaddr, size ) \
73 (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
74#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
75 __copy_from_user(arg1, arg2, arg3)
76#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
77 __copy_to_user(arg1, arg2, arg3)
78#define DRM_GET_USER_UNCHECKED(val, uaddr) \
79 __get_user(val, uaddr)
80
81#define DRM_HZ HZ
82
83#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
84do { \
85 DECLARE_WAITQUEUE(entry, current); \
86 unsigned long end = jiffies + (timeout); \
87 add_wait_queue(&(queue), &entry); \
88 \
89 for (;;) { \
90 __set_current_state(TASK_INTERRUPTIBLE); \
91 if (condition) \
92 break; \
93 if (time_after_eq(jiffies, end)) { \
94 ret = -EBUSY; \
95 break; \
96 } \
97 schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
98 if (signal_pending(current)) { \
99 ret = -EINTR; \
100 break; \
101 } \
102 } \
103 __set_current_state(TASK_RUNNING); \
104 remove_wait_queue(&(queue), &entry); \
105} while (0)
106
107#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
108#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
deleted file mode 100644
index 135bd19499fc..000000000000
--- a/drivers/char/drm/drm_pciids.h
+++ /dev/null
@@ -1,415 +0,0 @@
1/*
2 This file is auto-generated from the drm_pciids.txt in the DRM CVS
3 Please contact dri-devel@lists.sf.net to add new cards to this list
4*/
5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
12 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
13 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
14 {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
15 {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
16 {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
17 {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
18 {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
19 {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
20 {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
21 {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
22 {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
23 {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
24 {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
25 {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
26 {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
27 {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
28 {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
29 {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
30 {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
31 {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
32 {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
33 {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
34 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
35 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
36 {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
37 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
38 {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
39 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
40 {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
41 {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
42 {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
43 {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
44 {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
45 {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
46 {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
47 {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
48 {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
49 {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
50 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
51 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
52 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
53 {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
54 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
55 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
56 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
57 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
58 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
59 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
60 {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
61 {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
62 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
63 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
64 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
65 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
66 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
67 {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
68 {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
69 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
70 {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
71 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
72 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
73 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
74 {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
75 {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
76 {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
77 {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
78 {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
79 {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
80 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
81 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
82 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
87 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
88 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
89 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
90 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
91 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
92 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
93 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
94 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
95 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
96 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
97 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
98 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
102 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
103 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
104 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
105 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
106 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
107 {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
108 {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
109 {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
110 {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
111 {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
112 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
116 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
117 {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
118 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
119 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
120 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
121 {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
122 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
123 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
124 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
125 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
126 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
127 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
128 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
129 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
130 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
131 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
132 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
133 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
134 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
135 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
136 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
137 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
138 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
139 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
140 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
141 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
163 {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
164 {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
179 {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
180 {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
181 {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
182 {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
183 {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
184 {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
185 {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
186 {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
187 {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
188 {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
189 {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
190 {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
191 {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
192 {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
193 {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
194 {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
195 {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
196 {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
197 {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
198 {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
199 {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
200 {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
201 {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
202 {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
203 {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
204 {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
205 {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
206 {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
207 {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
210 {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
211 {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
212 {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
213 {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
214 {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
215 {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
216 {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
217 {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
218 {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
219 {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
220 {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
221 {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
223 {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
224 {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
225 {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
226 {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
227 {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
228 {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
229 {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
230 {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
231 {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
232 {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
233 {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
234 {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
235 {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
236 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
237 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
239 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
240 {0, 0, 0}
241
242#define r128_PCI_IDS \
243 {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
244 {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
245 {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
246 {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
247 {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
248 {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
249 {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
250 {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
251 {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
252 {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
253 {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
254 {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
255 {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
256 {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
257 {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
258 {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
259 {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
260 {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
261 {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
262 {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
263 {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
264 {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
265 {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
266 {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
267 {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
268 {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
269 {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
270 {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
271 {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
272 {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
273 {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
274 {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
275 {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
276 {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
277 {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
278 {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
279 {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
280 {0, 0, 0}
281
282#define mga_PCI_IDS \
283 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
284 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
285 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
286 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
287 {0, 0, 0}
288
289#define mach64_PCI_IDS \
290 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
291 {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
292 {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
293 {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
294 {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
295 {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
296 {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
297 {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
298 {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
299 {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
300 {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
301 {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
302 {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
303 {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
304 {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
305 {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
306 {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
307 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
308 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
309 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
310 {0, 0, 0}
311
312#define sisdrv_PCI_IDS \
313 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
314 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
315 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
316 {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
317 {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
318 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
319 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
320 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
321 {0, 0, 0}
322
323#define tdfx_PCI_IDS \
324 {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
325 {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
326 {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
327 {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
328 {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
329 {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
330 {0, 0, 0}
331
332#define viadrv_PCI_IDS \
333 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
334 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
335 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
336 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
337 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
338 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
339 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
340 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
341 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
342 {0, 0, 0}
343
344#define i810_PCI_IDS \
345 {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
346 {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
347 {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
348 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
349 {0, 0, 0}
350
351#define i830_PCI_IDS \
352 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
353 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
354 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
355 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
356 {0, 0, 0}
357
358#define gamma_PCI_IDS \
359 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
360 {0, 0, 0}
361
362#define savage_PCI_IDS \
363 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
364 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
365 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
366 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
367 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
368 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
369 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
370 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
371 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
372 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
373 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
374 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
375 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
376 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
377 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
378 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
379 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
380 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
381 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
382 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
383 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
384 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
385 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
386 {0, 0, 0}
387
388#define ffb_PCI_IDS \
389 {0, 0, 0}
390
391#define i915_PCI_IDS \
392 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
393 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
394 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
395 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
396 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
397 {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
398 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
399 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
400 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
401 {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
402 {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
403 {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
404 {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
405 {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
406 {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
407 {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
408 {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
409 {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
410 {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
411 {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
412 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
413 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
414 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
415 {0, 0, 0}
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
deleted file mode 100644
index 480037331e4e..000000000000
--- a/drivers/char/drm/drm_sarea.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/**
2 * \file drm_sarea.h
3 * \brief SAREA definitions
4 *
5 * \author Michel Dänzer <michel@daenzer.net>
6 */
7
8/*
9 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
10 * All Rights Reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the next
20 * paragraph) shall be included in all copies or substantial portions of the
21 * Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
27 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
28 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
29 * OTHER DEALINGS IN THE SOFTWARE.
30 */
31
32#ifndef _DRM_SAREA_H_
33#define _DRM_SAREA_H_
34
35#include "drm.h"
36
37/* SAREA area needs to be at least a page */
38#if defined(__alpha__)
39#define SAREA_MAX 0x2000
40#elif defined(__ia64__)
41#define SAREA_MAX 0x10000 /* 64kB */
42#else
43/* Intel 830M driver needs at least 8k SAREA */
44#define SAREA_MAX 0x2000
45#endif
46
47/** Maximum number of drawables in the SAREA */
48#define SAREA_MAX_DRAWABLES 256
49
50#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
51
52/** SAREA drawable */
53struct drm_sarea_drawable {
54 unsigned int stamp;
55 unsigned int flags;
56};
57
58/** SAREA frame */
59struct drm_sarea_frame {
60 unsigned int x;
61 unsigned int y;
62 unsigned int width;
63 unsigned int height;
64 unsigned int fullscreen;
65};
66
67/** SAREA */
68struct drm_sarea {
69 /** first thing is always the DRM locking structure */
70 struct drm_hw_lock lock;
71 /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
72 struct drm_hw_lock drawable_lock;
73 struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
74 struct drm_sarea_frame frame; /**< frame */
75 drm_context_t dummy_context;
76};
77
78#ifndef __KERNEL__
79typedef struct drm_sarea_drawable drm_sarea_drawable_t;
80typedef struct drm_sarea_frame drm_sarea_frame_t;
81typedef struct drm_sarea drm_sarea_t;
82#endif
83
84#endif /* _DRM_SAREA_H_ */
diff --git a/drivers/char/drm/drm_sman.h b/drivers/char/drm/drm_sman.h
deleted file mode 100644
index 08ecf83ad5d4..000000000000
--- a/drivers/char/drm/drm_sman.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory MANager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#ifndef DRM_SMAN_H
40#define DRM_SMAN_H
41
42#include "drmP.h"
43#include "drm_hashtab.h"
44
45/*
46 * A class that is an abstration of a simple memory allocator.
47 * The sman implementation provides a default such allocator
48 * using the drm_mm.c implementation. But the user can replace it.
49 * See the SiS implementation, which may use the SiS FB kernel module
50 * for memory management.
51 */
52
53struct drm_sman_mm {
54 /* private info. If allocated, needs to be destroyed by the destroy
55 function */
56 void *private;
57
58 /* Allocate a memory block with given size and alignment.
59 Return an opaque reference to the memory block */
60
61 void *(*allocate) (void *private, unsigned long size,
62 unsigned alignment);
63
64 /* Free a memory block. "ref" is the opaque reference that we got from
65 the "alloc" function */
66
67 void (*free) (void *private, void *ref);
68
69 /* Free all resources associated with this allocator */
70
71 void (*destroy) (void *private);
72
73 /* Return a memory offset from the opaque reference returned from the
74 "alloc" function */
75
76 unsigned long (*offset) (void *private, void *ref);
77};
78
79struct drm_memblock_item {
80 struct list_head owner_list;
81 struct drm_hash_item user_hash;
82 void *mm_info;
83 struct drm_sman_mm *mm;
84 struct drm_sman *sman;
85};
86
87struct drm_sman {
88 struct drm_sman_mm *mm;
89 int num_managers;
90 struct drm_open_hash owner_hash_tab;
91 struct drm_open_hash user_hash_tab;
92 struct list_head owner_items;
93};
94
95/*
96 * Take down a memory manager. This function should only be called after a
97 * successful init and after a call to drm_sman_cleanup.
98 */
99
100extern void drm_sman_takedown(struct drm_sman * sman);
101
102/*
103 * Allocate structures for a manager.
104 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
105 * user_order is the log2 of the number of buckets in the user hash table.
106 * set this to approximately log2 of the max number of memory regions
107 * that will be allocated for _all_ pools together.
108 * owner_order is the log2 of the number of buckets in the owner hash table.
109 * set this to approximately log2 of
110 * the number of client file connections that will
111 * be using the manager.
112 *
113 */
114
115extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
116 unsigned int user_order, unsigned int owner_order);
117
118/*
119 * Initialize a drm_mm.c allocator. Should be called only once for each
120 * manager unless a customized allogator is used.
121 */
122
123extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
124 unsigned long start, unsigned long size);
125
126/*
127 * Initialize a customized allocator for one of the managers.
128 * (See the SiS module). The object pointed to by "allocator" is copied,
129 * so it can be destroyed after this call.
130 */
131
132extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
133 struct drm_sman_mm * allocator);
134
135/*
136 * Allocate a memory block. Aligment is not implemented yet.
137 */
138
139extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
140 unsigned int manager,
141 unsigned long size,
142 unsigned alignment,
143 unsigned long owner);
144/*
145 * Free a memory block identified by its user hash key.
146 */
147
148extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
149
150/*
151 * returns 1 iff there are no stale memory blocks associated with this owner.
152 * Typically called to determine if we need to idle the hardware and call
153 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
154 * resources associated with owner.
155 */
156
157extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
158
159/*
160 * Frees all stale memory blocks associated with this owner. Note that this
161 * requires that the hardware is finished with all blocks, so the graphics engine
162 * should be idled before this call is made. This function also frees
163 * any resources associated with "owner" and should be called when owner
164 * is not going to be referenced anymore.
165 */
166
167extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
168
169/*
170 * Frees all stale memory blocks associated with the memory manager.
171 * See idling above.
172 */
173
174extern void drm_sman_cleanup(struct drm_sman * sman);
175
176#endif
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
deleted file mode 100644
index 7a10bb6f2c0f..000000000000
--- a/drivers/char/drm/i810_drm.h
+++ /dev/null
@@ -1,281 +0,0 @@
1#ifndef _I810_DRM_H_
2#define _I810_DRM_H_
3
4/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver.
6 */
7
8#ifndef _I810_DEFINES_
9#define _I810_DEFINES_
10
11#define I810_DMA_BUF_ORDER 12
12#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
13#define I810_DMA_BUF_NR 256
14#define I810_NR_SAREA_CLIPRECTS 8
15
16/* Each region is a minimum of 64k, and there are at most 64 of them.
17 */
18#define I810_NR_TEX_REGIONS 64
19#define I810_LOG_MIN_TEX_REGION_SIZE 16
20#endif
21
22#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
23#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
24#define I810_UPLOAD_CTX 0x4
25#define I810_UPLOAD_BUFFERS 0x8
26#define I810_UPLOAD_TEX0 0x10
27#define I810_UPLOAD_TEX1 0x20
28#define I810_UPLOAD_CLIPRECTS 0x40
29
30/* Indices into buf.Setup where various bits of state are mirrored per
31 * context and per buffer. These can be fired at the card as a unit,
32 * or in a piecewise fashion as required.
33 */
34
35/* Destbuffer state
36 * - backbuffer linear offset and pitch -- invarient in the current dri
37 * - zbuffer linear offset and pitch -- also invarient
38 * - drawing origin in back and depth buffers.
39 *
40 * Keep the depth/back buffer state here to accommodate private buffers
41 * in the future.
42 */
43#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
44#define I810_DESTREG_DI1 1
45#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
46#define I810_DESTREG_DV1 3
47#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
48#define I810_DESTREG_DR1 5
49#define I810_DESTREG_DR2 6
50#define I810_DESTREG_DR3 7
51#define I810_DESTREG_DR4 8
52#define I810_DEST_SETUP_SIZE 10
53
54/* Context state
55 */
56#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
57#define I810_CTXREG_CF1 1
58#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
59#define I810_CTXREG_ST1 3
60#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
61#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
62#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
63#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
64#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
65#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
66#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
67#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
68#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
69#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
70#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
71#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
72#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
73#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
74#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
75#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
76#define I810_CTX_SETUP_SIZE 20
77
78/* Texture state (per tex unit)
79 */
80#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
81#define I810_TEXREG_MI1 1
82#define I810_TEXREG_MI2 2
83#define I810_TEXREG_MI3 3
84#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
85#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
86#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
87#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
88#define I810_TEX_SETUP_SIZE 8
89
90/* Flags for clear ioctl
91 */
92#define I810_FRONT 0x1
93#define I810_BACK 0x2
94#define I810_DEPTH 0x4
95
96typedef enum _drm_i810_init_func {
97 I810_INIT_DMA = 0x01,
98 I810_CLEANUP_DMA = 0x02,
99 I810_INIT_DMA_1_4 = 0x03
100} drm_i810_init_func_t;
101
102/* This is the init structure after v1.2 */
103typedef struct _drm_i810_init {
104 drm_i810_init_func_t func;
105 unsigned int mmio_offset;
106 unsigned int buffers_offset;
107 int sarea_priv_offset;
108 unsigned int ring_start;
109 unsigned int ring_end;
110 unsigned int ring_size;
111 unsigned int front_offset;
112 unsigned int back_offset;
113 unsigned int depth_offset;
114 unsigned int overlay_offset;
115 unsigned int overlay_physical;
116 unsigned int w;
117 unsigned int h;
118 unsigned int pitch;
119 unsigned int pitch_bits;
120} drm_i810_init_t;
121
122/* This is the init structure prior to v1.2 */
123typedef struct _drm_i810_pre12_init {
124 drm_i810_init_func_t func;
125 unsigned int mmio_offset;
126 unsigned int buffers_offset;
127 int sarea_priv_offset;
128 unsigned int ring_start;
129 unsigned int ring_end;
130 unsigned int ring_size;
131 unsigned int front_offset;
132 unsigned int back_offset;
133 unsigned int depth_offset;
134 unsigned int w;
135 unsigned int h;
136 unsigned int pitch;
137 unsigned int pitch_bits;
138} drm_i810_pre12_init_t;
139
140/* Warning: If you change the SAREA structure you must change the Xserver
141 * structure as well */
142
143typedef struct _drm_i810_tex_region {
144 unsigned char next, prev; /* indices to form a circular LRU */
145 unsigned char in_use; /* owned by a client, or free? */
146 int age; /* tracked by clients to update local LRU's */
147} drm_i810_tex_region_t;
148
149typedef struct _drm_i810_sarea {
150 unsigned int ContextState[I810_CTX_SETUP_SIZE];
151 unsigned int BufferState[I810_DEST_SETUP_SIZE];
152 unsigned int TexState[2][I810_TEX_SETUP_SIZE];
153 unsigned int dirty;
154
155 unsigned int nbox;
156 struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
157
158 /* Maintain an LRU of contiguous regions of texture space. If
159 * you think you own a region of texture memory, and it has an
160 * age different to the one you set, then you are mistaken and
161 * it has been stolen by another client. If global texAge
162 * hasn't changed, there is no need to walk the list.
163 *
164 * These regions can be used as a proxy for the fine-grained
165 * texture information of other clients - by maintaining them
166 * in the same lru which is used to age their own textures,
167 * clients have an approximate lru for the whole of global
168 * texture space, and can make informed decisions as to which
169 * areas to kick out. There is no need to choose whether to
170 * kick out your own texture or someone else's - simply eject
171 * them all in LRU order.
172 */
173
174 drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
175 /* Last elt is sentinal */
176 int texAge; /* last time texture was uploaded */
177 int last_enqueue; /* last time a buffer was enqueued */
178 int last_dispatch; /* age of the most recently dispatched buffer */
179 int last_quiescent; /* */
180 int ctxOwner; /* last context to upload state */
181
182 int vertex_prim;
183
184 int pf_enabled; /* is pageflipping allowed? */
185 int pf_active;
186 int pf_current_page; /* which buffer is being displayed? */
187} drm_i810_sarea_t;
188
189/* WARNING: If you change any of these defines, make sure to change the
190 * defines in the Xserver file (xf86drmMga.h)
191 */
192
193/* i810 specific ioctls
194 * The device specific ioctl range is 0x40 to 0x79.
195 */
196#define DRM_I810_INIT 0x00
197#define DRM_I810_VERTEX 0x01
198#define DRM_I810_CLEAR 0x02
199#define DRM_I810_FLUSH 0x03
200#define DRM_I810_GETAGE 0x04
201#define DRM_I810_GETBUF 0x05
202#define DRM_I810_SWAP 0x06
203#define DRM_I810_COPY 0x07
204#define DRM_I810_DOCOPY 0x08
205#define DRM_I810_OV0INFO 0x09
206#define DRM_I810_FSTATUS 0x0a
207#define DRM_I810_OV0FLIP 0x0b
208#define DRM_I810_MC 0x0c
209#define DRM_I810_RSTATUS 0x0d
210#define DRM_I810_FLIP 0x0e
211
212#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
213#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
214#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
215#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
216#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
217#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
218#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
219#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
220#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
221#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
222#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
223#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
224#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
225#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
226#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
227
228typedef struct _drm_i810_clear {
229 int clear_color;
230 int clear_depth;
231 int flags;
232} drm_i810_clear_t;
233
234/* These may be placeholders if we have more cliprects than
235 * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
236 * false, indicating that the buffer will be dispatched again with a
237 * new set of cliprects.
238 */
239typedef struct _drm_i810_vertex {
240 int idx; /* buffer index */
241 int used; /* nr bytes in use */
242 int discard; /* client is finished with the buffer? */
243} drm_i810_vertex_t;
244
245typedef struct _drm_i810_copy_t {
246 int idx; /* buffer index */
247 int used; /* nr bytes in use */
248 void *address; /* Address to copy from */
249} drm_i810_copy_t;
250
251#define PR_TRIANGLES (0x0<<18)
252#define PR_TRISTRIP_0 (0x1<<18)
253#define PR_TRISTRIP_1 (0x2<<18)
254#define PR_TRIFAN (0x3<<18)
255#define PR_POLYGON (0x4<<18)
256#define PR_LINES (0x5<<18)
257#define PR_LINESTRIP (0x6<<18)
258#define PR_RECTS (0x7<<18)
259#define PR_MASK (0x7<<18)
260
261typedef struct drm_i810_dma {
262 void *virtual;
263 int request_idx;
264 int request_size;
265 int granted;
266} drm_i810_dma_t;
267
268typedef struct _drm_i810_overlay_t {
269 unsigned int offset; /* Address of the Overlay Regs */
270 unsigned int physical;
271} drm_i810_overlay_t;
272
273typedef struct _drm_i810_mc {
274 int idx; /* buffer index */
275 int used; /* nr bytes in use */
276 int num_blocks; /* number of GFXBlocks */
277 int *length; /* List of lengths for GFXBlocks (FUTURE) */
278 unsigned int last_render; /* Last Render Request */
279} drm_i810_mc_t;
280
281#endif /* _I810_DRM_H_ */
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
deleted file mode 100644
index 4b00d2dd4f68..000000000000
--- a/drivers/char/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
1#ifndef _I830_DRM_H_
2#define _I830_DRM_H_
3
4/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver.
6 *
7 * KW: Actually, you can't ever change them because doing so would
8 * break backwards compatibility.
9 */
10
11#ifndef _I830_DEFINES_
12#define _I830_DEFINES_
13
14#define I830_DMA_BUF_ORDER 12
15#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
16#define I830_DMA_BUF_NR 256
17#define I830_NR_SAREA_CLIPRECTS 8
18
19/* Each region is a minimum of 64k, and there are at most 64 of them.
20 */
21#define I830_NR_TEX_REGIONS 64
22#define I830_LOG_MIN_TEX_REGION_SIZE 16
23
24/* KW: These aren't correct but someone set them to two and then
25 * released the module. Now we can't change them as doing so would
26 * break backwards compatibility.
27 */
28#define I830_TEXTURE_COUNT 2
29#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
30
31#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
32
33#define I830_UPLOAD_CTX 0x1
34#define I830_UPLOAD_BUFFERS 0x2
35#define I830_UPLOAD_CLIPRECTS 0x4
36#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
37#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
38#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
39#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
40#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
41#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
42#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
43#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
44#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
45#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
46#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
47#define I830_UPLOAD_TEX0 0x10000
48#define I830_UPLOAD_TEX1 0x20000
49#define I830_UPLOAD_TEX2 0x40000
50#define I830_UPLOAD_TEX3 0x80000
51#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
52#define I830_UPLOAD_TEX_MASK 0xf0000
53#define I830_UPLOAD_TEXBLEND0 0x100000
54#define I830_UPLOAD_TEXBLEND1 0x200000
55#define I830_UPLOAD_TEXBLEND2 0x400000
56#define I830_UPLOAD_TEXBLEND3 0x800000
57#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
58#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
59#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
60#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
61#define I830_UPLOAD_STIPPLE 0x8000000
62
63/* Indices into buf.Setup where various bits of state are mirrored per
64 * context and per buffer. These can be fired at the card as a unit,
65 * or in a piecewise fashion as required.
66 */
67
68/* Destbuffer state
69 * - backbuffer linear offset and pitch -- invarient in the current dri
70 * - zbuffer linear offset and pitch -- also invarient
71 * - drawing origin in back and depth buffers.
72 *
73 * Keep the depth/back buffer state here to accommodate private buffers
74 * in the future.
75 */
76
77#define I830_DESTREG_CBUFADDR 0
78#define I830_DESTREG_DBUFADDR 1
79#define I830_DESTREG_DV0 2
80#define I830_DESTREG_DV1 3
81#define I830_DESTREG_SENABLE 4
82#define I830_DESTREG_SR0 5
83#define I830_DESTREG_SR1 6
84#define I830_DESTREG_SR2 7
85#define I830_DESTREG_DR0 8
86#define I830_DESTREG_DR1 9
87#define I830_DESTREG_DR2 10
88#define I830_DESTREG_DR3 11
89#define I830_DESTREG_DR4 12
90#define I830_DEST_SETUP_SIZE 13
91
92/* Context state
93 */
94#define I830_CTXREG_STATE1 0
95#define I830_CTXREG_STATE2 1
96#define I830_CTXREG_STATE3 2
97#define I830_CTXREG_STATE4 3
98#define I830_CTXREG_STATE5 4
99#define I830_CTXREG_IALPHAB 5
100#define I830_CTXREG_STENCILTST 6
101#define I830_CTXREG_ENABLES_1 7
102#define I830_CTXREG_ENABLES_2 8
103#define I830_CTXREG_AA 9
104#define I830_CTXREG_FOGCOLOR 10
105#define I830_CTXREG_BLENDCOLR0 11
106#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
107#define I830_CTXREG_VF 13
108#define I830_CTXREG_VF2 14
109#define I830_CTXREG_MCSB0 15
110#define I830_CTXREG_MCSB1 16
111#define I830_CTX_SETUP_SIZE 17
112
113/* 1.3: Stipple state
114 */
115#define I830_STPREG_ST0 0
116#define I830_STPREG_ST1 1
117#define I830_STP_SETUP_SIZE 2
118
119/* Texture state (per tex unit)
120 */
121
122#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
123#define I830_TEXREG_MI1 1
124#define I830_TEXREG_MI2 2
125#define I830_TEXREG_MI3 3
126#define I830_TEXREG_MI4 4
127#define I830_TEXREG_MI5 5
128#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
129#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
130#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
131#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
132#define I830_TEX_SETUP_SIZE 10
133
134#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
135#define I830_TEXREG_TM0S0 1
136#define I830_TEXREG_TM0S1 2
137#define I830_TEXREG_TM0S2 3
138#define I830_TEXREG_TM0S3 4
139#define I830_TEXREG_TM0S4 5
140#define I830_TEXREG_NOP0 6 /* noop */
141#define I830_TEXREG_NOP1 7 /* noop */
142#define I830_TEXREG_NOP2 8 /* noop */
143#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
144#define __I830_TEX_SETUP_SIZE 10
145
146#define I830_FRONT 0x1
147#define I830_BACK 0x2
148#define I830_DEPTH 0x4
149
150#endif /* _I830_DEFINES_ */
151
152typedef struct _drm_i830_init {
153 enum {
154 I830_INIT_DMA = 0x01,
155 I830_CLEANUP_DMA = 0x02
156 } func;
157 unsigned int mmio_offset;
158 unsigned int buffers_offset;
159 int sarea_priv_offset;
160 unsigned int ring_start;
161 unsigned int ring_end;
162 unsigned int ring_size;
163 unsigned int front_offset;
164 unsigned int back_offset;
165 unsigned int depth_offset;
166 unsigned int w;
167 unsigned int h;
168 unsigned int pitch;
169 unsigned int pitch_bits;
170 unsigned int back_pitch;
171 unsigned int depth_pitch;
172 unsigned int cpp;
173} drm_i830_init_t;
174
175/* Warning: If you change the SAREA structure you must change the Xserver
176 * structure as well */
177
178typedef struct _drm_i830_tex_region {
179 unsigned char next, prev; /* indices to form a circular LRU */
180 unsigned char in_use; /* owned by a client, or free? */
181 int age; /* tracked by clients to update local LRU's */
182} drm_i830_tex_region_t;
183
184typedef struct _drm_i830_sarea {
185 unsigned int ContextState[I830_CTX_SETUP_SIZE];
186 unsigned int BufferState[I830_DEST_SETUP_SIZE];
187 unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
188 unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
189 unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
190 unsigned int Palette[2][256];
191 unsigned int dirty;
192
193 unsigned int nbox;
194 struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
195
196 /* Maintain an LRU of contiguous regions of texture space. If
197 * you think you own a region of texture memory, and it has an
198 * age different to the one you set, then you are mistaken and
199 * it has been stolen by another client. If global texAge
200 * hasn't changed, there is no need to walk the list.
201 *
202 * These regions can be used as a proxy for the fine-grained
203 * texture information of other clients - by maintaining them
204 * in the same lru which is used to age their own textures,
205 * clients have an approximate lru for the whole of global
206 * texture space, and can make informed decisions as to which
207 * areas to kick out. There is no need to choose whether to
208 * kick out your own texture or someone else's - simply eject
209 * them all in LRU order.
210 */
211
212 drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
213 /* Last elt is sentinal */
214 int texAge; /* last time texture was uploaded */
215 int last_enqueue; /* last time a buffer was enqueued */
216 int last_dispatch; /* age of the most recently dispatched buffer */
217 int last_quiescent; /* */
218 int ctxOwner; /* last context to upload state */
219
220 int vertex_prim;
221
222 int pf_enabled; /* is pageflipping allowed? */
223 int pf_active;
224 int pf_current_page; /* which buffer is being displayed? */
225
226 int perf_boxes; /* performance boxes to be displayed */
227
228 /* Here's the state for texunits 2,3:
229 */
230 unsigned int TexState2[I830_TEX_SETUP_SIZE];
231 unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
232 unsigned int TexBlendStateWordsUsed2;
233
234 unsigned int TexState3[I830_TEX_SETUP_SIZE];
235 unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
236 unsigned int TexBlendStateWordsUsed3;
237
238 unsigned int StippleState[I830_STP_SETUP_SIZE];
239} drm_i830_sarea_t;
240
241/* Flags for perf_boxes
242 */
243#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
244#define I830_BOX_FLIP 0x2 /* populated by kernel */
245#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
246#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
247#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
248
249/* I830 specific ioctls
250 * The device specific ioctl range is 0x40 to 0x79.
251 */
252#define DRM_I830_INIT 0x00
253#define DRM_I830_VERTEX 0x01
254#define DRM_I830_CLEAR 0x02
255#define DRM_I830_FLUSH 0x03
256#define DRM_I830_GETAGE 0x04
257#define DRM_I830_GETBUF 0x05
258#define DRM_I830_SWAP 0x06
259#define DRM_I830_COPY 0x07
260#define DRM_I830_DOCOPY 0x08
261#define DRM_I830_FLIP 0x09
262#define DRM_I830_IRQ_EMIT 0x0a
263#define DRM_I830_IRQ_WAIT 0x0b
264#define DRM_I830_GETPARAM 0x0c
265#define DRM_I830_SETPARAM 0x0d
266
267#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
268#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
269#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
270#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
271#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
272#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
273#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
274#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
275#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
276#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
277#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
278#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
279#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
280#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
281
282typedef struct _drm_i830_clear {
283 int clear_color;
284 int clear_depth;
285 int flags;
286 unsigned int clear_colormask;
287 unsigned int clear_depthmask;
288} drm_i830_clear_t;
289
290/* These may be placeholders if we have more cliprects than
291 * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
292 * false, indicating that the buffer will be dispatched again with a
293 * new set of cliprects.
294 */
295typedef struct _drm_i830_vertex {
296 int idx; /* buffer index */
297 int used; /* nr bytes in use */
298 int discard; /* client is finished with the buffer? */
299} drm_i830_vertex_t;
300
301typedef struct _drm_i830_copy_t {
302 int idx; /* buffer index */
303 int used; /* nr bytes in use */
304 void __user *address; /* Address to copy from */
305} drm_i830_copy_t;
306
307typedef struct drm_i830_dma {
308 void __user *virtual;
309 int request_idx;
310 int request_size;
311 int granted;
312} drm_i830_dma_t;
313
314/* 1.3: Userspace can request & wait on irq's:
315 */
316typedef struct drm_i830_irq_emit {
317 int __user *irq_seq;
318} drm_i830_irq_emit_t;
319
320typedef struct drm_i830_irq_wait {
321 int irq_seq;
322} drm_i830_irq_wait_t;
323
324/* 1.3: New ioctl to query kernel params:
325 */
326#define I830_PARAM_IRQ_ACTIVE 1
327
328typedef struct drm_i830_getparam {
329 int param;
330 int __user *value;
331} drm_i830_getparam_t;
332
333/* 1.3: New ioctl to set kernel params:
334 */
335#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
336
337typedef struct drm_i830_setparam {
338 int param;
339 int value;
340} drm_i830_setparam_t;
341
342#endif /* _I830_DRM_H_ */
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
deleted file mode 100644
index 05c66cf03a9e..000000000000
--- a/drivers/char/drm/i915_drm.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _I915_DRM_H_
28#define _I915_DRM_H_
29
30/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints.
32 */
33
34#include "drm.h"
35
36/* Each region is a minimum of 16k, and there are at most 255 of them.
37 */
38#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
39 * of chars for next/prev indices */
40#define I915_LOG_MIN_TEX_REGION_SIZE 14
41
42typedef struct _drm_i915_init {
43 enum {
44 I915_INIT_DMA = 0x01,
45 I915_CLEANUP_DMA = 0x02,
46 I915_RESUME_DMA = 0x03
47 } func;
48 unsigned int mmio_offset;
49 int sarea_priv_offset;
50 unsigned int ring_start;
51 unsigned int ring_end;
52 unsigned int ring_size;
53 unsigned int front_offset;
54 unsigned int back_offset;
55 unsigned int depth_offset;
56 unsigned int w;
57 unsigned int h;
58 unsigned int pitch;
59 unsigned int pitch_bits;
60 unsigned int back_pitch;
61 unsigned int depth_pitch;
62 unsigned int cpp;
63 unsigned int chipset;
64} drm_i915_init_t;
65
66typedef struct _drm_i915_sarea {
67 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
68 int last_upload; /* last time texture was uploaded */
69 int last_enqueue; /* last time a buffer was enqueued */
70 int last_dispatch; /* age of the most recently dispatched buffer */
71 int ctxOwner; /* last context to upload state */
72 int texAge;
73 int pf_enabled; /* is pageflipping allowed? */
74 int pf_active;
75 int pf_current_page; /* which buffer is being displayed? */
76 int perf_boxes; /* performance boxes to be displayed */
77 int width, height; /* screen size in pixels */
78
79 drm_handle_t front_handle;
80 int front_offset;
81 int front_size;
82
83 drm_handle_t back_handle;
84 int back_offset;
85 int back_size;
86
87 drm_handle_t depth_handle;
88 int depth_offset;
89 int depth_size;
90
91 drm_handle_t tex_handle;
92 int tex_offset;
93 int tex_size;
94 int log_tex_granularity;
95 int pitch;
96 int rotation; /* 0, 90, 180 or 270 */
97 int rotated_offset;
98 int rotated_size;
99 int rotated_pitch;
100 int virtualX, virtualY;
101
102 unsigned int front_tiled;
103 unsigned int back_tiled;
104 unsigned int depth_tiled;
105 unsigned int rotated_tiled;
106 unsigned int rotated2_tiled;
107
108 int pipeA_x;
109 int pipeA_y;
110 int pipeA_w;
111 int pipeA_h;
112 int pipeB_x;
113 int pipeB_y;
114 int pipeB_w;
115 int pipeB_h;
116} drm_i915_sarea_t;
117
118/* Flags for perf_boxes
119 */
120#define I915_BOX_RING_EMPTY 0x1
121#define I915_BOX_FLIP 0x2
122#define I915_BOX_WAIT 0x4
123#define I915_BOX_TEXTURE_LOAD 0x8
124#define I915_BOX_LOST_CONTEXT 0x10
125
126/* I915 specific ioctls
127 * The device specific ioctl range is 0x40 to 0x79.
128 */
129#define DRM_I915_INIT 0x00
130#define DRM_I915_FLUSH 0x01
131#define DRM_I915_FLIP 0x02
132#define DRM_I915_BATCHBUFFER 0x03
133#define DRM_I915_IRQ_EMIT 0x04
134#define DRM_I915_IRQ_WAIT 0x05
135#define DRM_I915_GETPARAM 0x06
136#define DRM_I915_SETPARAM 0x07
137#define DRM_I915_ALLOC 0x08
138#define DRM_I915_FREE 0x09
139#define DRM_I915_INIT_HEAP 0x0a
140#define DRM_I915_CMDBUFFER 0x0b
141#define DRM_I915_DESTROY_HEAP 0x0c
142#define DRM_I915_SET_VBLANK_PIPE 0x0d
143#define DRM_I915_GET_VBLANK_PIPE 0x0e
144#define DRM_I915_VBLANK_SWAP 0x0f
145#define DRM_I915_HWS_ADDR 0x11
146
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
149#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
150#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
151#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
152#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
153#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
154#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
155#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
156#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
157#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
158#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
159#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
160#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
163
164/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware.
166 */
167typedef struct _drm_i915_batchbuffer {
168 int start; /* agp offset */
169 int used; /* nr bytes in use */
170 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
171 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
172 int num_cliprects; /* mulitpass with multiple cliprects? */
173 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
174} drm_i915_batchbuffer_t;
175
176/* As above, but pass a pointer to userspace buffer which can be
177 * validated by the kernel prior to sending to hardware.
178 */
179typedef struct _drm_i915_cmdbuffer {
180 char __user *buf; /* pointer to userspace command buffer */
181 int sz; /* nr bytes in buf */
182 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
183 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
184 int num_cliprects; /* mulitpass with multiple cliprects? */
185 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
186} drm_i915_cmdbuffer_t;
187
188/* Userspace can request & wait on irq's:
189 */
190typedef struct drm_i915_irq_emit {
191 int __user *irq_seq;
192} drm_i915_irq_emit_t;
193
194typedef struct drm_i915_irq_wait {
195 int irq_seq;
196} drm_i915_irq_wait_t;
197
198/* Ioctl to query kernel params:
199 */
200#define I915_PARAM_IRQ_ACTIVE 1
201#define I915_PARAM_ALLOW_BATCHBUFFER 2
202#define I915_PARAM_LAST_DISPATCH 3
203
204typedef struct drm_i915_getparam {
205 int param;
206 int __user *value;
207} drm_i915_getparam_t;
208
209/* Ioctl to set kernel params:
210 */
211#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
212#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
213#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
214
215typedef struct drm_i915_setparam {
216 int param;
217 int value;
218} drm_i915_setparam_t;
219
220/* A memory manager for regions of shared memory:
221 */
222#define I915_MEM_REGION_AGP 1
223
224typedef struct drm_i915_mem_alloc {
225 int region;
226 int alignment;
227 int size;
228 int __user *region_offset; /* offset from start of fb or agp */
229} drm_i915_mem_alloc_t;
230
231typedef struct drm_i915_mem_free {
232 int region;
233 int region_offset;
234} drm_i915_mem_free_t;
235
236typedef struct drm_i915_mem_init_heap {
237 int region;
238 int size;
239 int start;
240} drm_i915_mem_init_heap_t;
241
242/* Allow memory manager to be torn down and re-initialized (eg on
243 * rotate):
244 */
245typedef struct drm_i915_mem_destroy_heap {
246 int region;
247} drm_i915_mem_destroy_heap_t;
248
249/* Allow X server to configure which pipes to monitor for vblank signals
250 */
251#define DRM_I915_VBLANK_PIPE_A 1
252#define DRM_I915_VBLANK_PIPE_B 2
253
254typedef struct drm_i915_vblank_pipe {
255 int pipe;
256} drm_i915_vblank_pipe_t;
257
258/* Schedule buffer swap at given vertical blank:
259 */
260typedef struct drm_i915_vblank_swap {
261 drm_drawable_t drawable;
262 enum drm_vblank_seq_type seqtype;
263 unsigned int sequence;
264} drm_i915_vblank_swap_t;
265
266typedef struct drm_i915_hws_addr {
267 uint64_t addr;
268} drm_i915_hws_addr_t;
269
270#endif /* _I915_DRM_H_ */
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
deleted file mode 100644
index 944b50a5ff24..000000000000
--- a/drivers/char/drm/mga_drm.h
+++ /dev/null
@@ -1,417 +0,0 @@
1/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
2 * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 * Rewritten by:
32 * Gareth Hughes <gareth@valinux.com>
33 */
34
35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__
37
38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the Xserver file (mga_sarea.h)
40 */
41
42#ifndef __MGA_SAREA_DEFINES__
43#define __MGA_SAREA_DEFINES__
44
45/* WARP pipe flags
46 */
47#define MGA_F 0x1 /* fog */
48#define MGA_A 0x2 /* alpha */
49#define MGA_S 0x4 /* specular */
50#define MGA_T2 0x8 /* multitexture */
51
52#define MGA_WARP_TGZ 0
53#define MGA_WARP_TGZF (MGA_F)
54#define MGA_WARP_TGZA (MGA_A)
55#define MGA_WARP_TGZAF (MGA_F|MGA_A)
56#define MGA_WARP_TGZS (MGA_S)
57#define MGA_WARP_TGZSF (MGA_S|MGA_F)
58#define MGA_WARP_TGZSA (MGA_S|MGA_A)
59#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
60#define MGA_WARP_T2GZ (MGA_T2)
61#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
62#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
63#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
64#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
65#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
66#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
67#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
68
69#define MGA_MAX_G200_PIPES 8 /* no multitex */
70#define MGA_MAX_G400_PIPES 16
71#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
72#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
73
74#define MGA_CARD_TYPE_G200 1
75#define MGA_CARD_TYPE_G400 2
76#define MGA_CARD_TYPE_G450 3 /* not currently used */
77#define MGA_CARD_TYPE_G550 4
78
79#define MGA_FRONT 0x1
80#define MGA_BACK 0x2
81#define MGA_DEPTH 0x4
82
83/* What needs to be changed for the current vertex dma buffer?
84 */
85#define MGA_UPLOAD_CONTEXT 0x1
86#define MGA_UPLOAD_TEX0 0x2
87#define MGA_UPLOAD_TEX1 0x4
88#define MGA_UPLOAD_PIPE 0x8
89#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
90#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
91#define MGA_UPLOAD_2D 0x40
92#define MGA_WAIT_AGE 0x80 /* handled client-side */
93#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
94#if 0
95#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
96 quiescent */
97#endif
98
99/* 32 buffers of 64k each, total 2 meg.
100 */
101#define MGA_BUFFER_SIZE (1 << 16)
102#define MGA_NUM_BUFFERS 128
103
104/* Keep these small for testing.
105 */
106#define MGA_NR_SAREA_CLIPRECTS 8
107
108/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
109 * regions, subject to a minimum region size of (1<<16) == 64k.
110 *
111 * Clients may subdivide regions internally, but when sharing between
112 * clients, the region size is the minimum granularity.
113 */
114
115#define MGA_CARD_HEAP 0
116#define MGA_AGP_HEAP 1
117#define MGA_NR_TEX_HEAPS 2
118#define MGA_NR_TEX_REGIONS 16
119#define MGA_LOG_MIN_TEX_REGION_SIZE 16
120
121#define DRM_MGA_IDLE_RETRY 2048
122
123#endif /* __MGA_SAREA_DEFINES__ */
124
125/* Setup registers for 3D context
126 */
127typedef struct {
128 unsigned int dstorg;
129 unsigned int maccess;
130 unsigned int plnwt;
131 unsigned int dwgctl;
132 unsigned int alphactrl;
133 unsigned int fogcolor;
134 unsigned int wflag;
135 unsigned int tdualstage0;
136 unsigned int tdualstage1;
137 unsigned int fcol;
138 unsigned int stencil;
139 unsigned int stencilctl;
140} drm_mga_context_regs_t;
141
142/* Setup registers for 2D, X server
143 */
144typedef struct {
145 unsigned int pitch;
146} drm_mga_server_regs_t;
147
148/* Setup registers for each texture unit
149 */
150typedef struct {
151 unsigned int texctl;
152 unsigned int texctl2;
153 unsigned int texfilter;
154 unsigned int texbordercol;
155 unsigned int texorg;
156 unsigned int texwidth;
157 unsigned int texheight;
158 unsigned int texorg1;
159 unsigned int texorg2;
160 unsigned int texorg3;
161 unsigned int texorg4;
162} drm_mga_texture_regs_t;
163
164/* General aging mechanism
165 */
166typedef struct {
167 unsigned int head; /* Position of head pointer */
168 unsigned int wrap; /* Primary DMA wrap count */
169} drm_mga_age_t;
170
171typedef struct _drm_mga_sarea {
172 /* The channel for communication of state information to the kernel
173 * on firing a vertex dma buffer.
174 */
175 drm_mga_context_regs_t context_state;
176 drm_mga_server_regs_t server_state;
177 drm_mga_texture_regs_t tex_state[2];
178 unsigned int warp_pipe;
179 unsigned int dirty;
180 unsigned int vertsize;
181
182 /* The current cliprects, or a subset thereof.
183 */
184 struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
185 unsigned int nbox;
186
187 /* Information about the most recently used 3d drawable. The
188 * client fills in the req_* fields, the server fills in the
189 * exported_ fields and puts the cliprects into boxes, above.
190 *
191 * The client clears the exported_drawable field before
192 * clobbering the boxes data.
193 */
194 unsigned int req_drawable; /* the X drawable id */
195 unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
196
197 unsigned int exported_drawable;
198 unsigned int exported_index;
199 unsigned int exported_stamp;
200 unsigned int exported_buffers;
201 unsigned int exported_nfront;
202 unsigned int exported_nback;
203 int exported_back_x, exported_front_x, exported_w;
204 int exported_back_y, exported_front_y, exported_h;
205 struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
206
207 /* Counters for aging textures and for client-side throttling.
208 */
209 unsigned int status[4];
210 unsigned int last_wrap;
211
212 drm_mga_age_t last_frame;
213 unsigned int last_enqueue; /* last time a buffer was enqueued */
214 unsigned int last_dispatch; /* age of the most recently dispatched buffer */
215 unsigned int last_quiescent; /* */
216
217 /* LRU lists for texture memory in agp space and on the card.
218 */
219 struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
220 unsigned int texAge[MGA_NR_TEX_HEAPS];
221
222 /* Mechanism to validate card state.
223 */
224 int ctxOwner;
225} drm_mga_sarea_t;
226
227/* MGA specific ioctls
228 * The device specific ioctl range is 0x40 to 0x79.
229 */
230#define DRM_MGA_INIT 0x00
231#define DRM_MGA_FLUSH 0x01
232#define DRM_MGA_RESET 0x02
233#define DRM_MGA_SWAP 0x03
234#define DRM_MGA_CLEAR 0x04
235#define DRM_MGA_VERTEX 0x05
236#define DRM_MGA_INDICES 0x06
237#define DRM_MGA_ILOAD 0x07
238#define DRM_MGA_BLIT 0x08
239#define DRM_MGA_GETPARAM 0x09
240
241/* 3.2:
242 * ioctls for operating on fences.
243 */
244#define DRM_MGA_SET_FENCE 0x0a
245#define DRM_MGA_WAIT_FENCE 0x0b
246#define DRM_MGA_DMA_BOOTSTRAP 0x0c
247
248#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
249#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
250#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
251#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
252#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
253#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
254#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
255#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
256#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
257#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
258#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
259#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
260#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
261
262typedef struct _drm_mga_warp_index {
263 int installed;
264 unsigned long phys_addr;
265 int size;
266} drm_mga_warp_index_t;
267
268typedef struct drm_mga_init {
269 enum {
270 MGA_INIT_DMA = 0x01,
271 MGA_CLEANUP_DMA = 0x02
272 } func;
273
274 unsigned long sarea_priv_offset;
275
276 int chipset;
277 int sgram;
278
279 unsigned int maccess;
280
281 unsigned int fb_cpp;
282 unsigned int front_offset, front_pitch;
283 unsigned int back_offset, back_pitch;
284
285 unsigned int depth_cpp;
286 unsigned int depth_offset, depth_pitch;
287
288 unsigned int texture_offset[MGA_NR_TEX_HEAPS];
289 unsigned int texture_size[MGA_NR_TEX_HEAPS];
290
291 unsigned long fb_offset;
292 unsigned long mmio_offset;
293 unsigned long status_offset;
294 unsigned long warp_offset;
295 unsigned long primary_offset;
296 unsigned long buffers_offset;
297} drm_mga_init_t;
298
299typedef struct drm_mga_dma_bootstrap {
300 /**
301 * \name AGP texture region
302 *
303 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
304 * be filled in with the actual AGP texture settings.
305 *
306 * \warning
307 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
308 * is zero, it means that PCI memory (most likely through the use of
309 * an IOMMU) is being used for "AGP" textures.
310 */
311 /*@{ */
312 unsigned long texture_handle; /**< Handle used to map AGP textures. */
313 uint32_t texture_size; /**< Size of the AGP texture region. */
314 /*@} */
315
316 /**
317 * Requested size of the primary DMA region.
318 *
319 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
320 * filled in with the actual AGP mode. If AGP was not available
321 */
322 uint32_t primary_size;
323
324 /**
325 * Requested number of secondary DMA buffers.
326 *
327 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
328 * filled in with the actual number of secondary DMA buffers
329 * allocated. Particularly when PCI DMA is used, this may be
330 * (subtantially) less than the number requested.
331 */
332 uint32_t secondary_bin_count;
333
334 /**
335 * Requested size of each secondary DMA buffer.
336 *
337 * While the kernel \b is free to reduce
338 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
339 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
340 */
341 uint32_t secondary_bin_size;
342
343 /**
344 * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
345 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
346 * zero, it means that PCI DMA should be used, even if AGP is
347 * possible.
348 *
349 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
350 * filled in with the actual AGP mode. If AGP was not available
351 * (i.e., PCI DMA was used), this value will be zero.
352 */
353 uint32_t agp_mode;
354
355 /**
356 * Desired AGP GART size, measured in megabytes.
357 */
358 uint8_t agp_size;
359} drm_mga_dma_bootstrap_t;
360
361typedef struct drm_mga_clear {
362 unsigned int flags;
363 unsigned int clear_color;
364 unsigned int clear_depth;
365 unsigned int color_mask;
366 unsigned int depth_mask;
367} drm_mga_clear_t;
368
369typedef struct drm_mga_vertex {
370 int idx; /* buffer to queue */
371 int used; /* bytes in use */
372 int discard; /* client finished with buffer? */
373} drm_mga_vertex_t;
374
375typedef struct drm_mga_indices {
376 int idx; /* buffer to queue */
377 unsigned int start;
378 unsigned int end;
379 int discard; /* client finished with buffer? */
380} drm_mga_indices_t;
381
382typedef struct drm_mga_iload {
383 int idx;
384 unsigned int dstorg;
385 unsigned int length;
386} drm_mga_iload_t;
387
388typedef struct _drm_mga_blit {
389 unsigned int planemask;
390 unsigned int srcorg;
391 unsigned int dstorg;
392 int src_pitch, dst_pitch;
393 int delta_sx, delta_sy;
394 int delta_dx, delta_dy;
395 int height, ydir; /* flip image vertically */
396 int source_pitch, dest_pitch;
397} drm_mga_blit_t;
398
399/* 3.1: An ioctl to get parameters that aren't available to the 3d
400 * client any other way.
401 */
402#define MGA_PARAM_IRQ_NR 1
403
404/* 3.2: Query the actual card type. The DDX only distinguishes between
405 * G200 chips and non-G200 chips, which it calls G400. It turns out that
406 * there are some very sublte differences between the G4x0 chips and the G550
407 * chips. Using this parameter query, a client-side driver can detect the
408 * difference between a G4x0 and a G550.
409 */
410#define MGA_PARAM_CARD_TYPE 2
411
412typedef struct drm_mga_getparam {
413 int param;
414 void __user *value;
415} drm_mga_getparam_t;
416
417#endif
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
deleted file mode 100644
index 8d8878b55f55..000000000000
--- a/drivers/char/drm/r128_drm.h
+++ /dev/null
@@ -1,326 +0,0 @@
1/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 */
4/*
5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * All rights reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Gareth Hughes <gareth@valinux.com>
30 * Kevin E. Martin <martin@valinux.com>
31 */
32
33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__
35
36/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (r128_sarea.h)
38 */
39#ifndef __R128_SAREA_DEFINES__
40#define __R128_SAREA_DEFINES__
41
42/* What needs to be changed for the current vertex buffer?
43 */
44#define R128_UPLOAD_CONTEXT 0x001
45#define R128_UPLOAD_SETUP 0x002
46#define R128_UPLOAD_TEX0 0x004
47#define R128_UPLOAD_TEX1 0x008
48#define R128_UPLOAD_TEX0IMAGES 0x010
49#define R128_UPLOAD_TEX1IMAGES 0x020
50#define R128_UPLOAD_CORE 0x040
51#define R128_UPLOAD_MASKS 0x080
52#define R128_UPLOAD_WINDOW 0x100
53#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
54#define R128_REQUIRE_QUIESCENCE 0x400
55#define R128_UPLOAD_ALL 0x7ff
56
57#define R128_FRONT 0x1
58#define R128_BACK 0x2
59#define R128_DEPTH 0x4
60
61/* Primitive types
62 */
63#define R128_POINTS 0x1
64#define R128_LINES 0x2
65#define R128_LINE_STRIP 0x3
66#define R128_TRIANGLES 0x4
67#define R128_TRIANGLE_FAN 0x5
68#define R128_TRIANGLE_STRIP 0x6
69
70/* Vertex/indirect buffer size
71 */
72#define R128_BUFFER_SIZE 16384
73
74/* Byte offsets for indirect buffer data
75 */
76#define R128_INDEX_PRIM_OFFSET 20
77#define R128_HOSTDATA_BLIT_OFFSET 32
78
79/* Keep these small for testing.
80 */
81#define R128_NR_SAREA_CLIPRECTS 12
82
83/* There are 2 heaps (local/AGP). Each region within a heap is a
84 * minimum of 64k, and there are at most 64 of them per heap.
85 */
86#define R128_LOCAL_TEX_HEAP 0
87#define R128_AGP_TEX_HEAP 1
88#define R128_NR_TEX_HEAPS 2
89#define R128_NR_TEX_REGIONS 64
90#define R128_LOG_TEX_GRANULARITY 16
91
92#define R128_NR_CONTEXT_REGS 12
93
94#define R128_MAX_TEXTURE_LEVELS 11
95#define R128_MAX_TEXTURE_UNITS 2
96
97#endif /* __R128_SAREA_DEFINES__ */
98
99typedef struct {
100 /* Context state - can be written in one large chunk */
101 unsigned int dst_pitch_offset_c;
102 unsigned int dp_gui_master_cntl_c;
103 unsigned int sc_top_left_c;
104 unsigned int sc_bottom_right_c;
105 unsigned int z_offset_c;
106 unsigned int z_pitch_c;
107 unsigned int z_sten_cntl_c;
108 unsigned int tex_cntl_c;
109 unsigned int misc_3d_state_cntl_reg;
110 unsigned int texture_clr_cmp_clr_c;
111 unsigned int texture_clr_cmp_msk_c;
112 unsigned int fog_color_c;
113
114 /* Texture state */
115 unsigned int tex_size_pitch_c;
116 unsigned int constant_color_c;
117
118 /* Setup state */
119 unsigned int pm4_vc_fpu_setup;
120 unsigned int setup_cntl;
121
122 /* Mask state */
123 unsigned int dp_write_mask;
124 unsigned int sten_ref_mask_c;
125 unsigned int plane_3d_mask_c;
126
127 /* Window state */
128 unsigned int window_xy_offset;
129
130 /* Core state */
131 unsigned int scale_3d_cntl;
132} drm_r128_context_regs_t;
133
134/* Setup registers for each texture unit
135 */
136typedef struct {
137 unsigned int tex_cntl;
138 unsigned int tex_combine_cntl;
139 unsigned int tex_size_pitch;
140 unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
141 unsigned int tex_border_color;
142} drm_r128_texture_regs_t;
143
144typedef struct drm_r128_sarea {
145 /* The channel for communication of state information to the kernel
146 * on firing a vertex buffer.
147 */
148 drm_r128_context_regs_t context_state;
149 drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
150 unsigned int dirty;
151 unsigned int vertsize;
152 unsigned int vc_format;
153
154 /* The current cliprects, or a subset thereof.
155 */
156 struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
157 unsigned int nbox;
158
159 /* Counters for client-side throttling of rendering clients.
160 */
161 unsigned int last_frame;
162 unsigned int last_dispatch;
163
164 struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
165 unsigned int tex_age[R128_NR_TEX_HEAPS];
166 int ctx_owner;
167 int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
168 int pfCurrentPage; /* which buffer is being displayed? */
169} drm_r128_sarea_t;
170
171/* WARNING: If you change any of these defines, make sure to change the
172 * defines in the Xserver file (xf86drmR128.h)
173 */
174
175/* Rage 128 specific ioctls
176 * The device specific ioctl range is 0x40 to 0x79.
177 */
178#define DRM_R128_INIT 0x00
179#define DRM_R128_CCE_START 0x01
180#define DRM_R128_CCE_STOP 0x02
181#define DRM_R128_CCE_RESET 0x03
182#define DRM_R128_CCE_IDLE 0x04
183/* 0x05 not used */
184#define DRM_R128_RESET 0x06
185#define DRM_R128_SWAP 0x07
186#define DRM_R128_CLEAR 0x08
187#define DRM_R128_VERTEX 0x09
188#define DRM_R128_INDICES 0x0a
189#define DRM_R128_BLIT 0x0b
190#define DRM_R128_DEPTH 0x0c
191#define DRM_R128_STIPPLE 0x0d
192/* 0x0e not used */
193#define DRM_R128_INDIRECT 0x0f
194#define DRM_R128_FULLSCREEN 0x10
195#define DRM_R128_CLEAR2 0x11
196#define DRM_R128_GETPARAM 0x12
197#define DRM_R128_FLIP 0x13
198
199#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
200#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
201#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
202#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
203#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
204/* 0x05 not used */
205#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
206#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
207#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
208#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
209#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
210#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
211#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
212#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
213/* 0x0e not used */
214#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
215#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
216#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
217#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
218#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
219
220typedef struct drm_r128_init {
221 enum {
222 R128_INIT_CCE = 0x01,
223 R128_CLEANUP_CCE = 0x02
224 } func;
225 unsigned long sarea_priv_offset;
226 int is_pci;
227 int cce_mode;
228 int cce_secure;
229 int ring_size;
230 int usec_timeout;
231
232 unsigned int fb_bpp;
233 unsigned int front_offset, front_pitch;
234 unsigned int back_offset, back_pitch;
235 unsigned int depth_bpp;
236 unsigned int depth_offset, depth_pitch;
237 unsigned int span_offset;
238
239 unsigned long fb_offset;
240 unsigned long mmio_offset;
241 unsigned long ring_offset;
242 unsigned long ring_rptr_offset;
243 unsigned long buffers_offset;
244 unsigned long agp_textures_offset;
245} drm_r128_init_t;
246
247typedef struct drm_r128_cce_stop {
248 int flush;
249 int idle;
250} drm_r128_cce_stop_t;
251
252typedef struct drm_r128_clear {
253 unsigned int flags;
254 unsigned int clear_color;
255 unsigned int clear_depth;
256 unsigned int color_mask;
257 unsigned int depth_mask;
258} drm_r128_clear_t;
259
260typedef struct drm_r128_vertex {
261 int prim;
262 int idx; /* Index of vertex buffer */
263 int count; /* Number of vertices in buffer */
264 int discard; /* Client finished with buffer? */
265} drm_r128_vertex_t;
266
267typedef struct drm_r128_indices {
268 int prim;
269 int idx;
270 int start;
271 int end;
272 int discard; /* Client finished with buffer? */
273} drm_r128_indices_t;
274
275typedef struct drm_r128_blit {
276 int idx;
277 int pitch;
278 int offset;
279 int format;
280 unsigned short x, y;
281 unsigned short width, height;
282} drm_r128_blit_t;
283
284typedef struct drm_r128_depth {
285 enum {
286 R128_WRITE_SPAN = 0x01,
287 R128_WRITE_PIXELS = 0x02,
288 R128_READ_SPAN = 0x03,
289 R128_READ_PIXELS = 0x04
290 } func;
291 int n;
292 int __user *x;
293 int __user *y;
294 unsigned int __user *buffer;
295 unsigned char __user *mask;
296} drm_r128_depth_t;
297
298typedef struct drm_r128_stipple {
299 unsigned int __user *mask;
300} drm_r128_stipple_t;
301
302typedef struct drm_r128_indirect {
303 int idx;
304 int start;
305 int end;
306 int discard;
307} drm_r128_indirect_t;
308
309typedef struct drm_r128_fullscreen {
310 enum {
311 R128_INIT_FULLSCREEN = 0x01,
312 R128_CLEANUP_FULLSCREEN = 0x02
313 } func;
314} drm_r128_fullscreen_t;
315
316/* 2.3: An ioctl to get parameters that aren't available to the 3d
317 * client any other way.
318 */
319#define R128_PARAM_IRQ_NR 1
320
321typedef struct drm_r128_getparam {
322 int param;
323 void __user *value;
324} drm_r128_getparam_t;
325
326#endif
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
deleted file mode 100644
index 73ff51f12311..000000000000
--- a/drivers/char/drm/radeon_drm.h
+++ /dev/null
@@ -1,749 +0,0 @@
1/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
2 *
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * Keith Whitwell <keith@tungstengraphics.com>
31 */
32
33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__
35
36/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (radeon_sarea.h)
38 */
39#ifndef __RADEON_SAREA_DEFINES__
40#define __RADEON_SAREA_DEFINES__
41
42/* Old style state flags, required for sarea interface (1.1 and 1.2
43 * clears) and 1.2 drm_vertex2 ioctl.
44 */
45#define RADEON_UPLOAD_CONTEXT 0x00000001
46#define RADEON_UPLOAD_VERTFMT 0x00000002
47#define RADEON_UPLOAD_LINE 0x00000004
48#define RADEON_UPLOAD_BUMPMAP 0x00000008
49#define RADEON_UPLOAD_MASKS 0x00000010
50#define RADEON_UPLOAD_VIEWPORT 0x00000020
51#define RADEON_UPLOAD_SETUP 0x00000040
52#define RADEON_UPLOAD_TCL 0x00000080
53#define RADEON_UPLOAD_MISC 0x00000100
54#define RADEON_UPLOAD_TEX0 0x00000200
55#define RADEON_UPLOAD_TEX1 0x00000400
56#define RADEON_UPLOAD_TEX2 0x00000800
57#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
58#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
59#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
60#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
61#define RADEON_REQUIRE_QUIESCENCE 0x00010000
62#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
63#define RADEON_UPLOAD_ALL 0x003effff
64#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
65
66/* New style per-packet identifiers for use in cmd_buffer ioctl with
67 * the RADEON_EMIT_PACKET command. Comments relate new packets to old
68 * state bits and the packet size:
69 */
70#define RADEON_EMIT_PP_MISC 0 /* context/7 */
71#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
72#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
73#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
74#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
75#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
76#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
77#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
78#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
79#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
80#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
81#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
82#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
83#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
84#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
85#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
86#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
87#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
88#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
89#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
90#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
91#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
92#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
93#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
94#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
95#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
96#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
97#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
98#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
99#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
100#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
101#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
102#define R200_EMIT_VAP_CTL 32 /* vap/1 */
103#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
104#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
105#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
106#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
107#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
108#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
109#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
110#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
111#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
112#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
113#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
114#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
115#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
116#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
117#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
118#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
119#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
120#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
121#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
122#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
123#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
124#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
125#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
126#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
127#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
128#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
129#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
130#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
131#define R200_EMIT_PP_CUBIC_FACES_0 61
132#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
133#define R200_EMIT_PP_CUBIC_FACES_1 63
134#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
135#define R200_EMIT_PP_CUBIC_FACES_2 65
136#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
137#define R200_EMIT_PP_CUBIC_FACES_3 67
138#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
139#define R200_EMIT_PP_CUBIC_FACES_4 69
140#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
141#define R200_EMIT_PP_CUBIC_FACES_5 71
142#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
143#define RADEON_EMIT_PP_TEX_SIZE_0 73
144#define RADEON_EMIT_PP_TEX_SIZE_1 74
145#define RADEON_EMIT_PP_TEX_SIZE_2 75
146#define R200_EMIT_RB3D_BLENDCOLOR 76
147#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
148#define RADEON_EMIT_PP_CUBIC_FACES_0 78
149#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
150#define RADEON_EMIT_PP_CUBIC_FACES_1 80
151#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
152#define RADEON_EMIT_PP_CUBIC_FACES_2 82
153#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
154#define R200_EMIT_PP_TRI_PERF_CNTL 84
155#define R200_EMIT_PP_AFS_0 85
156#define R200_EMIT_PP_AFS_1 86
157#define R200_EMIT_ATF_TFACTOR 87
158#define R200_EMIT_PP_TXCTLALL_0 88
159#define R200_EMIT_PP_TXCTLALL_1 89
160#define R200_EMIT_PP_TXCTLALL_2 90
161#define R200_EMIT_PP_TXCTLALL_3 91
162#define R200_EMIT_PP_TXCTLALL_4 92
163#define R200_EMIT_PP_TXCTLALL_5 93
164#define R200_EMIT_VAP_PVS_CNTL 94
165#define RADEON_MAX_STATE_PACKETS 95
166
167/* Commands understood by cmd_buffer ioctl. More can be added but
168 * obviously these can't be removed or changed:
169 */
170#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
171#define RADEON_CMD_SCALARS 2 /* emit scalar data */
172#define RADEON_CMD_VECTORS 3 /* emit vector data */
173#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
174#define RADEON_CMD_PACKET3 5 /* emit hw packet */
175#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
176#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
177#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
178 * doesn't make the cpu wait, just
179 * the graphics hardware */
180#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
181
182typedef union {
183 int i;
184 struct {
185 unsigned char cmd_type, pad0, pad1, pad2;
186 } header;
187 struct {
188 unsigned char cmd_type, packet_id, pad0, pad1;
189 } packet;
190 struct {
191 unsigned char cmd_type, offset, stride, count;
192 } scalars;
193 struct {
194 unsigned char cmd_type, offset, stride, count;
195 } vectors;
196 struct {
197 unsigned char cmd_type, addr_lo, addr_hi, count;
198 } veclinear;
199 struct {
200 unsigned char cmd_type, buf_idx, pad0, pad1;
201 } dma;
202 struct {
203 unsigned char cmd_type, flags, pad0, pad1;
204 } wait;
205} drm_radeon_cmd_header_t;
206
207#define RADEON_WAIT_2D 0x1
208#define RADEON_WAIT_3D 0x2
209
210/* Allowed parameters for R300_CMD_PACKET3
211 */
212#define R300_CMD_PACKET3_CLEAR 0
213#define R300_CMD_PACKET3_RAW 1
214
215/* Commands understood by cmd_buffer ioctl for R300.
216 * The interface has not been stabilized, so some of these may be removed
217 * and eventually reordered before stabilization.
218 */
219#define R300_CMD_PACKET0 1
220#define R300_CMD_VPU 2 /* emit vertex program upload */
221#define R300_CMD_PACKET3 3 /* emit a packet3 */
222#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
223#define R300_CMD_CP_DELAY 5
224#define R300_CMD_DMA_DISCARD 6
225#define R300_CMD_WAIT 7
226# define R300_WAIT_2D 0x1
227# define R300_WAIT_3D 0x2
228/* these two defines are DOING IT WRONG - however
229 * we have userspace which relies on using these.
230 * The wait interface is backwards compat new
231 * code should use the NEW_WAIT defines below
232 * THESE ARE NOT BIT FIELDS
233 */
234# define R300_WAIT_2D_CLEAN 0x3
235# define R300_WAIT_3D_CLEAN 0x4
236
237# define R300_NEW_WAIT_2D_3D 0x3
238# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
239# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
240# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
241
242#define R300_CMD_SCRATCH 8
243#define R300_CMD_R500FP 9
244
245typedef union {
246 unsigned int u;
247 struct {
248 unsigned char cmd_type, pad0, pad1, pad2;
249 } header;
250 struct {
251 unsigned char cmd_type, count, reglo, reghi;
252 } packet0;
253 struct {
254 unsigned char cmd_type, count, adrlo, adrhi;
255 } vpu;
256 struct {
257 unsigned char cmd_type, packet, pad0, pad1;
258 } packet3;
259 struct {
260 unsigned char cmd_type, packet;
261 unsigned short count; /* amount of packet2 to emit */
262 } delay;
263 struct {
264 unsigned char cmd_type, buf_idx, pad0, pad1;
265 } dma;
266 struct {
267 unsigned char cmd_type, flags, pad0, pad1;
268 } wait;
269 struct {
270 unsigned char cmd_type, reg, n_bufs, flags;
271 } scratch;
272 struct {
273 unsigned char cmd_type, count, adrlo, adrhi_flags;
274 } r500fp;
275} drm_r300_cmd_header_t;
276
277#define RADEON_FRONT 0x1
278#define RADEON_BACK 0x2
279#define RADEON_DEPTH 0x4
280#define RADEON_STENCIL 0x8
281#define RADEON_CLEAR_FASTZ 0x80000000
282#define RADEON_USE_HIERZ 0x40000000
283#define RADEON_USE_COMP_ZBUF 0x20000000
284
285#define R500FP_CONSTANT_TYPE (1 << 1)
286#define R500FP_CONSTANT_CLAMP (1 << 2)
287
288/* Primitive types
289 */
290#define RADEON_POINTS 0x1
291#define RADEON_LINES 0x2
292#define RADEON_LINE_STRIP 0x3
293#define RADEON_TRIANGLES 0x4
294#define RADEON_TRIANGLE_FAN 0x5
295#define RADEON_TRIANGLE_STRIP 0x6
296
297/* Vertex/indirect buffer size
298 */
299#define RADEON_BUFFER_SIZE 65536
300
301/* Byte offsets for indirect buffer data
302 */
303#define RADEON_INDEX_PRIM_OFFSET 20
304
305#define RADEON_SCRATCH_REG_OFFSET 32
306
307#define RADEON_NR_SAREA_CLIPRECTS 12
308
309/* There are 2 heaps (local/GART). Each region within a heap is a
310 * minimum of 64k, and there are at most 64 of them per heap.
311 */
312#define RADEON_LOCAL_TEX_HEAP 0
313#define RADEON_GART_TEX_HEAP 1
314#define RADEON_NR_TEX_HEAPS 2
315#define RADEON_NR_TEX_REGIONS 64
316#define RADEON_LOG_TEX_GRANULARITY 16
317
318#define RADEON_MAX_TEXTURE_LEVELS 12
319#define RADEON_MAX_TEXTURE_UNITS 3
320
321#define RADEON_MAX_SURFACES 8
322
323/* Blits have strict offset rules. All blit offset must be aligned on
324 * a 1K-byte boundary.
325 */
326#define RADEON_OFFSET_SHIFT 10
327#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
328#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
329
330#endif /* __RADEON_SAREA_DEFINES__ */
331
332typedef struct {
333 unsigned int red;
334 unsigned int green;
335 unsigned int blue;
336 unsigned int alpha;
337} radeon_color_regs_t;
338
339typedef struct {
340 /* Context state */
341 unsigned int pp_misc; /* 0x1c14 */
342 unsigned int pp_fog_color;
343 unsigned int re_solid_color;
344 unsigned int rb3d_blendcntl;
345 unsigned int rb3d_depthoffset;
346 unsigned int rb3d_depthpitch;
347 unsigned int rb3d_zstencilcntl;
348
349 unsigned int pp_cntl; /* 0x1c38 */
350 unsigned int rb3d_cntl;
351 unsigned int rb3d_coloroffset;
352 unsigned int re_width_height;
353 unsigned int rb3d_colorpitch;
354 unsigned int se_cntl;
355
356 /* Vertex format state */
357 unsigned int se_coord_fmt; /* 0x1c50 */
358
359 /* Line state */
360 unsigned int re_line_pattern; /* 0x1cd0 */
361 unsigned int re_line_state;
362
363 unsigned int se_line_width; /* 0x1db8 */
364
365 /* Bumpmap state */
366 unsigned int pp_lum_matrix; /* 0x1d00 */
367
368 unsigned int pp_rot_matrix_0; /* 0x1d58 */
369 unsigned int pp_rot_matrix_1;
370
371 /* Mask state */
372 unsigned int rb3d_stencilrefmask; /* 0x1d7c */
373 unsigned int rb3d_ropcntl;
374 unsigned int rb3d_planemask;
375
376 /* Viewport state */
377 unsigned int se_vport_xscale; /* 0x1d98 */
378 unsigned int se_vport_xoffset;
379 unsigned int se_vport_yscale;
380 unsigned int se_vport_yoffset;
381 unsigned int se_vport_zscale;
382 unsigned int se_vport_zoffset;
383
384 /* Setup state */
385 unsigned int se_cntl_status; /* 0x2140 */
386
387 /* Misc state */
388 unsigned int re_top_left; /* 0x26c0 */
389 unsigned int re_misc;
390} drm_radeon_context_regs_t;
391
392typedef struct {
393 /* Zbias state */
394 unsigned int se_zbias_factor; /* 0x1dac */
395 unsigned int se_zbias_constant;
396} drm_radeon_context2_regs_t;
397
398/* Setup registers for each texture unit
399 */
400typedef struct {
401 unsigned int pp_txfilter;
402 unsigned int pp_txformat;
403 unsigned int pp_txoffset;
404 unsigned int pp_txcblend;
405 unsigned int pp_txablend;
406 unsigned int pp_tfactor;
407 unsigned int pp_border_color;
408} drm_radeon_texture_regs_t;
409
410typedef struct {
411 unsigned int start;
412 unsigned int finish;
413 unsigned int prim:8;
414 unsigned int stateidx:8;
415 unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
416 unsigned int vc_format; /* vertex format */
417} drm_radeon_prim_t;
418
419typedef struct {
420 drm_radeon_context_regs_t context;
421 drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
422 drm_radeon_context2_regs_t context2;
423 unsigned int dirty;
424} drm_radeon_state_t;
425
426typedef struct {
427 /* The channel for communication of state information to the
428 * kernel on firing a vertex buffer with either of the
429 * obsoleted vertex/index ioctls.
430 */
431 drm_radeon_context_regs_t context_state;
432 drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
433 unsigned int dirty;
434 unsigned int vertsize;
435 unsigned int vc_format;
436
437 /* The current cliprects, or a subset thereof.
438 */
439 struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
440 unsigned int nbox;
441
442 /* Counters for client-side throttling of rendering clients.
443 */
444 unsigned int last_frame;
445 unsigned int last_dispatch;
446 unsigned int last_clear;
447
448 struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
449 1];
450 unsigned int tex_age[RADEON_NR_TEX_HEAPS];
451 int ctx_owner;
452 int pfState; /* number of 3d windows (0,1,2ormore) */
453 int pfCurrentPage; /* which buffer is being displayed? */
454 int crtc2_base; /* CRTC2 frame offset */
455 int tiling_enabled; /* set by drm, read by 2d + 3d clients */
456} drm_radeon_sarea_t;
457
458/* WARNING: If you change any of these defines, make sure to change the
459 * defines in the Xserver file (xf86drmRadeon.h)
460 *
461 * KW: actually it's illegal to change any of this (backwards compatibility).
462 */
463
464/* Radeon specific ioctls
465 * The device specific ioctl range is 0x40 to 0x79.
466 */
467#define DRM_RADEON_CP_INIT 0x00
468#define DRM_RADEON_CP_START 0x01
469#define DRM_RADEON_CP_STOP 0x02
470#define DRM_RADEON_CP_RESET 0x03
471#define DRM_RADEON_CP_IDLE 0x04
472#define DRM_RADEON_RESET 0x05
473#define DRM_RADEON_FULLSCREEN 0x06
474#define DRM_RADEON_SWAP 0x07
475#define DRM_RADEON_CLEAR 0x08
476#define DRM_RADEON_VERTEX 0x09
477#define DRM_RADEON_INDICES 0x0A
478#define DRM_RADEON_NOT_USED
479#define DRM_RADEON_STIPPLE 0x0C
480#define DRM_RADEON_INDIRECT 0x0D
481#define DRM_RADEON_TEXTURE 0x0E
482#define DRM_RADEON_VERTEX2 0x0F
483#define DRM_RADEON_CMDBUF 0x10
484#define DRM_RADEON_GETPARAM 0x11
485#define DRM_RADEON_FLIP 0x12
486#define DRM_RADEON_ALLOC 0x13
487#define DRM_RADEON_FREE 0x14
488#define DRM_RADEON_INIT_HEAP 0x15
489#define DRM_RADEON_IRQ_EMIT 0x16
490#define DRM_RADEON_IRQ_WAIT 0x17
491#define DRM_RADEON_CP_RESUME 0x18
492#define DRM_RADEON_SETPARAM 0x19
493#define DRM_RADEON_SURF_ALLOC 0x1a
494#define DRM_RADEON_SURF_FREE 0x1b
495
496#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
497#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
498#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
499#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
500#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
501#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
502#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
503#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
504#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
505#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
506#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
507#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
508#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
509#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
510#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
511#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
512#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
513#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
514#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
515#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
516#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
517#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
518#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
519#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
520#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
521#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
522#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
523
524typedef struct drm_radeon_init {
525 enum {
526 RADEON_INIT_CP = 0x01,
527 RADEON_CLEANUP_CP = 0x02,
528 RADEON_INIT_R200_CP = 0x03,
529 RADEON_INIT_R300_CP = 0x04
530 } func;
531 unsigned long sarea_priv_offset;
532 int is_pci;
533 int cp_mode;
534 int gart_size;
535 int ring_size;
536 int usec_timeout;
537
538 unsigned int fb_bpp;
539 unsigned int front_offset, front_pitch;
540 unsigned int back_offset, back_pitch;
541 unsigned int depth_bpp;
542 unsigned int depth_offset, depth_pitch;
543
544 unsigned long fb_offset;
545 unsigned long mmio_offset;
546 unsigned long ring_offset;
547 unsigned long ring_rptr_offset;
548 unsigned long buffers_offset;
549 unsigned long gart_textures_offset;
550} drm_radeon_init_t;
551
552typedef struct drm_radeon_cp_stop {
553 int flush;
554 int idle;
555} drm_radeon_cp_stop_t;
556
557typedef struct drm_radeon_fullscreen {
558 enum {
559 RADEON_INIT_FULLSCREEN = 0x01,
560 RADEON_CLEANUP_FULLSCREEN = 0x02
561 } func;
562} drm_radeon_fullscreen_t;
563
564#define CLEAR_X1 0
565#define CLEAR_Y1 1
566#define CLEAR_X2 2
567#define CLEAR_Y2 3
568#define CLEAR_DEPTH 4
569
570typedef union drm_radeon_clear_rect {
571 float f[5];
572 unsigned int ui[5];
573} drm_radeon_clear_rect_t;
574
575typedef struct drm_radeon_clear {
576 unsigned int flags;
577 unsigned int clear_color;
578 unsigned int clear_depth;
579 unsigned int color_mask;
580 unsigned int depth_mask; /* misnamed field: should be stencil */
581 drm_radeon_clear_rect_t __user *depth_boxes;
582} drm_radeon_clear_t;
583
584typedef struct drm_radeon_vertex {
585 int prim;
586 int idx; /* Index of vertex buffer */
587 int count; /* Number of vertices in buffer */
588 int discard; /* Client finished with buffer? */
589} drm_radeon_vertex_t;
590
591typedef struct drm_radeon_indices {
592 int prim;
593 int idx;
594 int start;
595 int end;
596 int discard; /* Client finished with buffer? */
597} drm_radeon_indices_t;
598
599/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
600 * - allows multiple primitives and state changes in a single ioctl
601 * - supports driver change to emit native primitives
602 */
603typedef struct drm_radeon_vertex2 {
604 int idx; /* Index of vertex buffer */
605 int discard; /* Client finished with buffer? */
606 int nr_states;
607 drm_radeon_state_t __user *state;
608 int nr_prims;
609 drm_radeon_prim_t __user *prim;
610} drm_radeon_vertex2_t;
611
612/* v1.3 - obsoletes drm_radeon_vertex2
613 * - allows arbitarily large cliprect list
614 * - allows updating of tcl packet, vector and scalar state
615 * - allows memory-efficient description of state updates
616 * - allows state to be emitted without a primitive
617 * (for clears, ctx switches)
618 * - allows more than one dma buffer to be referenced per ioctl
619 * - supports tcl driver
620 * - may be extended in future versions with new cmd types, packets
621 */
622typedef struct drm_radeon_cmd_buffer {
623 int bufsz;
624 char __user *buf;
625 int nbox;
626 struct drm_clip_rect __user *boxes;
627} drm_radeon_cmd_buffer_t;
628
629typedef struct drm_radeon_tex_image {
630 unsigned int x, y; /* Blit coordinates */
631 unsigned int width, height;
632 const void __user *data;
633} drm_radeon_tex_image_t;
634
635typedef struct drm_radeon_texture {
636 unsigned int offset;
637 int pitch;
638 int format;
639 int width; /* Texture image coordinates */
640 int height;
641 drm_radeon_tex_image_t __user *image;
642} drm_radeon_texture_t;
643
644typedef struct drm_radeon_stipple {
645 unsigned int __user *mask;
646} drm_radeon_stipple_t;
647
648typedef struct drm_radeon_indirect {
649 int idx;
650 int start;
651 int end;
652 int discard;
653} drm_radeon_indirect_t;
654
655/* enum for card type parameters */
656#define RADEON_CARD_PCI 0
657#define RADEON_CARD_AGP 1
658#define RADEON_CARD_PCIE 2
659
660/* 1.3: An ioctl to get parameters that aren't available to the 3d
661 * client any other way.
662 */
663#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
664#define RADEON_PARAM_LAST_FRAME 2
665#define RADEON_PARAM_LAST_DISPATCH 3
666#define RADEON_PARAM_LAST_CLEAR 4
667/* Added with DRM version 1.6. */
668#define RADEON_PARAM_IRQ_NR 5
669#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
670/* Added with DRM version 1.8. */
671#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
672#define RADEON_PARAM_STATUS_HANDLE 8
673#define RADEON_PARAM_SAREA_HANDLE 9
674#define RADEON_PARAM_GART_TEX_HANDLE 10
675#define RADEON_PARAM_SCRATCH_OFFSET 11
676#define RADEON_PARAM_CARD_TYPE 12
677#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
678#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
679#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
680
681typedef struct drm_radeon_getparam {
682 int param;
683 void __user *value;
684} drm_radeon_getparam_t;
685
686/* 1.6: Set up a memory manager for regions of shared memory:
687 */
688#define RADEON_MEM_REGION_GART 1
689#define RADEON_MEM_REGION_FB 2
690
691typedef struct drm_radeon_mem_alloc {
692 int region;
693 int alignment;
694 int size;
695 int __user *region_offset; /* offset from start of fb or GART */
696} drm_radeon_mem_alloc_t;
697
698typedef struct drm_radeon_mem_free {
699 int region;
700 int region_offset;
701} drm_radeon_mem_free_t;
702
703typedef struct drm_radeon_mem_init_heap {
704 int region;
705 int size;
706 int start;
707} drm_radeon_mem_init_heap_t;
708
709/* 1.6: Userspace can request & wait on irq's:
710 */
711typedef struct drm_radeon_irq_emit {
712 int __user *irq_seq;
713} drm_radeon_irq_emit_t;
714
715typedef struct drm_radeon_irq_wait {
716 int irq_seq;
717} drm_radeon_irq_wait_t;
718
719/* 1.10: Clients tell the DRM where they think the framebuffer is located in
720 * the card's address space, via a new generic ioctl to set parameters
721 */
722
723typedef struct drm_radeon_setparam {
724 unsigned int param;
725 int64_t value;
726} drm_radeon_setparam_t;
727
728#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
729#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
730#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
731#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
732#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
733#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
734/* 1.14: Clients can allocate/free a surface
735 */
736typedef struct drm_radeon_surface_alloc {
737 unsigned int address;
738 unsigned int size;
739 unsigned int flags;
740} drm_radeon_surface_alloc_t;
741
742typedef struct drm_radeon_surface_free {
743 unsigned int address;
744} drm_radeon_surface_free_t;
745
746#define DRM_RADEON_VBLANK_CRTC1 1
747#define DRM_RADEON_VBLANK_CRTC2 2
748
749#endif
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
deleted file mode 100644
index 8a576ef01821..000000000000
--- a/drivers/char/drm/savage_drm.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/* savage_drm.h -- Public header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__
28
29#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__
31
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
33 * regions, subject to a minimum region size of (1<<16) == 64k.
34 *
35 * Clients may subdivide regions internally, but when sharing between
36 * clients, the region size is the minimum granularity.
37 */
38
39#define SAVAGE_CARD_HEAP 0
40#define SAVAGE_AGP_HEAP 1
41#define SAVAGE_NR_TEX_HEAPS 2
42#define SAVAGE_NR_TEX_REGIONS 16
43#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
44
45#endif /* __SAVAGE_SAREA_DEFINES__ */
46
47typedef struct _drm_savage_sarea {
48 /* LRU lists for texture memory in agp space and on the card.
49 */
50 struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
51 1];
52 unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
53
54 /* Mechanism to validate card state.
55 */
56 int ctxOwner;
57} drm_savage_sarea_t, *drm_savage_sarea_ptr;
58
59/* Savage-specific ioctls
60 */
61#define DRM_SAVAGE_BCI_INIT 0x00
62#define DRM_SAVAGE_BCI_CMDBUF 0x01
63#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
64#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
65
66#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
67#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
68#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
69#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
70
71#define SAVAGE_DMA_PCI 1
72#define SAVAGE_DMA_AGP 3
73typedef struct drm_savage_init {
74 enum {
75 SAVAGE_INIT_BCI = 1,
76 SAVAGE_CLEANUP_BCI = 2
77 } func;
78 unsigned int sarea_priv_offset;
79
80 /* some parameters */
81 unsigned int cob_size;
82 unsigned int bci_threshold_lo, bci_threshold_hi;
83 unsigned int dma_type;
84
85 /* frame buffer layout */
86 unsigned int fb_bpp;
87 unsigned int front_offset, front_pitch;
88 unsigned int back_offset, back_pitch;
89 unsigned int depth_bpp;
90 unsigned int depth_offset, depth_pitch;
91
92 /* local textures */
93 unsigned int texture_offset;
94 unsigned int texture_size;
95
96 /* physical locations of non-permanent maps */
97 unsigned long status_offset;
98 unsigned long buffers_offset;
99 unsigned long agp_textures_offset;
100 unsigned long cmd_dma_offset;
101} drm_savage_init_t;
102
103typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
104typedef struct drm_savage_cmdbuf {
105 /* command buffer in client's address space */
106 drm_savage_cmd_header_t __user *cmd_addr;
107 unsigned int size; /* size of the command buffer in 64bit units */
108
109 unsigned int dma_idx; /* DMA buffer index to use */
110 int discard; /* discard DMA buffer when done */
111 /* vertex buffer in client's address space */
112 unsigned int __user *vb_addr;
113 unsigned int vb_size; /* size of client vertex buffer in bytes */
114 unsigned int vb_stride; /* stride of vertices in 32bit words */
115 /* boxes in client's address space */
116 struct drm_clip_rect __user *box_addr;
117 unsigned int nbox; /* number of clipping boxes */
118} drm_savage_cmdbuf_t;
119
120#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
121#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
122#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
123typedef struct drm_savage_event {
124 unsigned int count;
125 unsigned int flags;
126} drm_savage_event_emit_t, drm_savage_event_wait_t;
127
128/* Commands for the cmdbuf ioctl
129 */
130#define SAVAGE_CMD_STATE 0 /* a range of state registers */
131#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
132#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
133#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
134#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
135#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
136#define SAVAGE_CMD_SWAP 6 /* swap buffers */
137
138/* Primitive types
139*/
140#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
141#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
142#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
143#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
144 * shading on s3d */
145
146/* Skip flags (vertex format)
147 */
148#define SAVAGE_SKIP_Z 0x01
149#define SAVAGE_SKIP_W 0x02
150#define SAVAGE_SKIP_C0 0x04
151#define SAVAGE_SKIP_C1 0x08
152#define SAVAGE_SKIP_S0 0x10
153#define SAVAGE_SKIP_T0 0x20
154#define SAVAGE_SKIP_ST0 0x30
155#define SAVAGE_SKIP_S1 0x40
156#define SAVAGE_SKIP_T1 0x80
157#define SAVAGE_SKIP_ST1 0xc0
158#define SAVAGE_SKIP_ALL_S3D 0x3f
159#define SAVAGE_SKIP_ALL_S4 0xff
160
161/* Buffer names for clear command
162 */
163#define SAVAGE_FRONT 0x1
164#define SAVAGE_BACK 0x2
165#define SAVAGE_DEPTH 0x4
166
167/* 64-bit command header
168 */
169union drm_savage_cmd_header {
170 struct {
171 unsigned char cmd; /* command */
172 unsigned char pad0;
173 unsigned short pad1;
174 unsigned short pad2;
175 unsigned short pad3;
176 } cmd; /* generic */
177 struct {
178 unsigned char cmd;
179 unsigned char global; /* need idle engine? */
180 unsigned short count; /* number of consecutive registers */
181 unsigned short start; /* first register */
182 unsigned short pad3;
183 } state; /* SAVAGE_CMD_STATE */
184 struct {
185 unsigned char cmd;
186 unsigned char prim; /* primitive type */
187 unsigned short skip; /* vertex format (skip flags) */
188 unsigned short count; /* number of vertices */
189 unsigned short start; /* first vertex in DMA/vertex buffer */
190 } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
191 struct {
192 unsigned char cmd;
193 unsigned char prim;
194 unsigned short skip;
195 unsigned short count; /* number of indices that follow */
196 unsigned short pad3;
197 } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
198 struct {
199 unsigned char cmd;
200 unsigned char pad0;
201 unsigned short pad1;
202 unsigned int flags;
203 } clear0; /* SAVAGE_CMD_CLEAR */
204 struct {
205 unsigned int mask;
206 unsigned int value;
207 } clear1; /* SAVAGE_CMD_CLEAR data */
208};
209
210#endif
diff --git a/drivers/char/drm/sis_drm.h b/drivers/char/drm/sis_drm.h
deleted file mode 100644
index 30f7b3827466..000000000000
--- a/drivers/char/drm/sis_drm.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
2/*
3 * Copyright 2005 Eric Anholt
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27#ifndef __SIS_DRM_H__
28#define __SIS_DRM_H__
29
30/* SiS specific ioctls */
31#define NOT_USED_0_3
32#define DRM_SIS_FB_ALLOC 0x04
33#define DRM_SIS_FB_FREE 0x05
34#define NOT_USED_6_12
35#define DRM_SIS_AGP_INIT 0x13
36#define DRM_SIS_AGP_ALLOC 0x14
37#define DRM_SIS_AGP_FREE 0x15
38#define DRM_SIS_FB_INIT 0x16
39
40#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
41#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
42#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
43#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
44#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
45#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
46/*
47#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
48#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
49#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
50*/
51
52typedef struct {
53 int context;
54 unsigned int offset;
55 unsigned int size;
56 unsigned long free;
57} drm_sis_mem_t;
58
59typedef struct {
60 unsigned int offset, size;
61} drm_sis_agp_t;
62
63typedef struct {
64 unsigned int offset, size;
65} drm_sis_fb_t;
66
67#endif /* __SIS_DRM_H__ */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
deleted file mode 100644
index a3b5c102b067..000000000000
--- a/drivers/char/drm/via_drm.h
+++ /dev/null
@@ -1,275 +0,0 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_
26
27/* WARNING: These defines must be the same as what the Xserver uses.
28 * if you change them, you must change the defines in the Xserver.
29 */
30
31#ifndef _VIA_DEFINES_
32#define _VIA_DEFINES_
33
34#ifndef __KERNEL__
35#include "via_drmclient.h"
36#endif
37
38#define VIA_NR_SAREA_CLIPRECTS 8
39#define VIA_NR_XVMC_PORTS 10
40#define VIA_NR_XVMC_LOCKS 5
41#define VIA_MAX_CACHELINE_SIZE 64
42#define XVMCLOCKPTR(saPriv,lockNo) \
43 ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
44 (VIA_MAX_CACHELINE_SIZE - 1)) & \
45 ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
46 VIA_MAX_CACHELINE_SIZE*(lockNo)))
47
48/* Each region is a minimum of 64k, and there are at most 64 of them.
49 */
50#define VIA_NR_TEX_REGIONS 64
51#define VIA_LOG_MIN_TEX_REGION_SIZE 16
52#endif
53
54#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
55#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
56#define VIA_UPLOAD_CTX 0x4
57#define VIA_UPLOAD_BUFFERS 0x8
58#define VIA_UPLOAD_TEX0 0x10
59#define VIA_UPLOAD_TEX1 0x20
60#define VIA_UPLOAD_CLIPRECTS 0x40
61#define VIA_UPLOAD_ALL 0xff
62
63/* VIA specific ioctls */
64#define DRM_VIA_ALLOCMEM 0x00
65#define DRM_VIA_FREEMEM 0x01
66#define DRM_VIA_AGP_INIT 0x02
67#define DRM_VIA_FB_INIT 0x03
68#define DRM_VIA_MAP_INIT 0x04
69#define DRM_VIA_DEC_FUTEX 0x05
70#define NOT_USED
71#define DRM_VIA_DMA_INIT 0x07
72#define DRM_VIA_CMDBUFFER 0x08
73#define DRM_VIA_FLUSH 0x09
74#define DRM_VIA_PCICMD 0x0a
75#define DRM_VIA_CMDBUF_SIZE 0x0b
76#define NOT_USED
77#define DRM_VIA_WAIT_IRQ 0x0d
78#define DRM_VIA_DMA_BLIT 0x0e
79#define DRM_VIA_BLIT_SYNC 0x0f
80
81#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
82#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
83#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
84#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
85#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
86#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
87#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
88#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
89#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
90#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
91#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
92 drm_via_cmdbuf_size_t)
93#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
94#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
95#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
96
97/* Indices into buf.Setup where various bits of state are mirrored per
98 * context and per buffer. These can be fired at the card as a unit,
99 * or in a piecewise fashion as required.
100 */
101
102#define VIA_TEX_SETUP_SIZE 8
103
104/* Flags for clear ioctl
105 */
106#define VIA_FRONT 0x1
107#define VIA_BACK 0x2
108#define VIA_DEPTH 0x4
109#define VIA_STENCIL 0x8
110#define VIA_MEM_VIDEO 0 /* matches drm constant */
111#define VIA_MEM_AGP 1 /* matches drm constant */
112#define VIA_MEM_SYSTEM 2
113#define VIA_MEM_MIXED 3
114#define VIA_MEM_UNKNOWN 4
115
116typedef struct {
117 uint32_t offset;
118 uint32_t size;
119} drm_via_agp_t;
120
121typedef struct {
122 uint32_t offset;
123 uint32_t size;
124} drm_via_fb_t;
125
126typedef struct {
127 uint32_t context;
128 uint32_t type;
129 uint32_t size;
130 unsigned long index;
131 unsigned long offset;
132} drm_via_mem_t;
133
134typedef struct _drm_via_init {
135 enum {
136 VIA_INIT_MAP = 0x01,
137 VIA_CLEANUP_MAP = 0x02
138 } func;
139
140 unsigned long sarea_priv_offset;
141 unsigned long fb_offset;
142 unsigned long mmio_offset;
143 unsigned long agpAddr;
144} drm_via_init_t;
145
146typedef struct _drm_via_futex {
147 enum {
148 VIA_FUTEX_WAIT = 0x00,
149 VIA_FUTEX_WAKE = 0X01
150 } func;
151 uint32_t ms;
152 uint32_t lock;
153 uint32_t val;
154} drm_via_futex_t;
155
156typedef struct _drm_via_dma_init {
157 enum {
158 VIA_INIT_DMA = 0x01,
159 VIA_CLEANUP_DMA = 0x02,
160 VIA_DMA_INITIALIZED = 0x03
161 } func;
162
163 unsigned long offset;
164 unsigned long size;
165 unsigned long reg_pause_addr;
166} drm_via_dma_init_t;
167
168typedef struct _drm_via_cmdbuffer {
169 char __user *buf;
170 unsigned long size;
171} drm_via_cmdbuffer_t;
172
173/* Warning: If you change the SAREA structure you must change the Xserver
174 * structure as well */
175
176typedef struct _drm_via_tex_region {
177 unsigned char next, prev; /* indices to form a circular LRU */
178 unsigned char inUse; /* owned by a client, or free? */
179 int age; /* tracked by clients to update local LRU's */
180} drm_via_tex_region_t;
181
182typedef struct _drm_via_sarea {
183 unsigned int dirty;
184 unsigned int nbox;
185 struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
186 drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
187 int texAge; /* last time texture was uploaded */
188 int ctxOwner; /* last context to upload state */
189 int vertexPrim;
190
191 /*
192 * Below is for XvMC.
193 * We want the lock integers alone on, and aligned to, a cache line.
194 * Therefore this somewhat strange construct.
195 */
196
197 char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
198
199 unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
200 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
201 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
202
203 /* Used by the 3d driver only at this point, for pageflipping:
204 */
205 unsigned int pfCurrentOffset;
206} drm_via_sarea_t;
207
208typedef struct _drm_via_cmdbuf_size {
209 enum {
210 VIA_CMDBUF_SPACE = 0x01,
211 VIA_CMDBUF_LAG = 0x02
212 } func;
213 int wait;
214 uint32_t size;
215} drm_via_cmdbuf_size_t;
216
217typedef enum {
218 VIA_IRQ_ABSOLUTE = 0x0,
219 VIA_IRQ_RELATIVE = 0x1,
220 VIA_IRQ_SIGNAL = 0x10000000,
221 VIA_IRQ_FORCE_SEQUENCE = 0x20000000
222} via_irq_seq_type_t;
223
224#define VIA_IRQ_FLAGS_MASK 0xF0000000
225
226enum drm_via_irqs {
227 drm_via_irq_hqv0 = 0,
228 drm_via_irq_hqv1,
229 drm_via_irq_dma0_dd,
230 drm_via_irq_dma0_td,
231 drm_via_irq_dma1_dd,
232 drm_via_irq_dma1_td,
233 drm_via_irq_num
234};
235
236struct drm_via_wait_irq_request {
237 unsigned irq;
238 via_irq_seq_type_t type;
239 uint32_t sequence;
240 uint32_t signal;
241};
242
243typedef union drm_via_irqwait {
244 struct drm_via_wait_irq_request request;
245 struct drm_wait_vblank_reply reply;
246} drm_via_irqwait_t;
247
248typedef struct drm_via_blitsync {
249 uint32_t sync_handle;
250 unsigned engine;
251} drm_via_blitsync_t;
252
253/* - * Below,"flags" is currently unused but will be used for possible future
254 * extensions like kernel space bounce buffers for bad alignments and
255 * blit engine busy-wait polling for better latency in the absence of
256 * interrupts.
257 */
258
259typedef struct drm_via_dmablit {
260 uint32_t num_lines;
261 uint32_t line_length;
262
263 uint32_t fb_addr;
264 uint32_t fb_stride;
265
266 unsigned char *mem_addr;
267 uint32_t mem_stride;
268
269 uint32_t flags;
270 int to_fb;
271
272 drm_via_blitsync_t sync;
273} drm_via_dmablit_t;
274
275#endif /* _VIA_DRM_H_ */
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index dd68f8541c2d..db2ae4216279 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -39,9 +39,14 @@ static int xencons_irq;
39 39
40/* ------------------------------------------------------------------ */ 40/* ------------------------------------------------------------------ */
41 41
42static unsigned long console_pfn = ~0ul;
43
42static inline struct xencons_interface *xencons_interface(void) 44static inline struct xencons_interface *xencons_interface(void)
43{ 45{
44 return mfn_to_virt(xen_start_info->console.domU.mfn); 46 if (console_pfn == ~0ul)
47 return mfn_to_virt(xen_start_info->console.domU.mfn);
48 else
49 return __va(console_pfn << PAGE_SHIFT);
45} 50}
46 51
47static inline void notify_daemon(void) 52static inline void notify_daemon(void)
@@ -101,20 +106,32 @@ static int __init xen_init(void)
101{ 106{
102 struct hvc_struct *hp; 107 struct hvc_struct *hp;
103 108
104 if (!is_running_on_xen()) 109 if (!is_running_on_xen() ||
105 return 0; 110 is_initial_xendomain() ||
111 !xen_start_info->console.domU.evtchn)
112 return -ENODEV;
106 113
107 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); 114 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
108 if (xencons_irq < 0) 115 if (xencons_irq < 0)
109 xencons_irq = 0 /* NO_IRQ */; 116 xencons_irq = 0; /* NO_IRQ */
117
110 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256); 118 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
111 if (IS_ERR(hp)) 119 if (IS_ERR(hp))
112 return PTR_ERR(hp); 120 return PTR_ERR(hp);
113 121
114 hvc = hp; 122 hvc = hp;
123
124 console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);
125
115 return 0; 126 return 0;
116} 127}
117 128
129void xen_console_resume(void)
130{
131 if (xencons_irq)
132 rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq);
133}
134
118static void __exit xen_fini(void) 135static void __exit xen_fini(void)
119{ 136{
120 if (hvc) 137 if (hvc)
@@ -134,12 +151,28 @@ module_init(xen_init);
134module_exit(xen_fini); 151module_exit(xen_fini);
135console_initcall(xen_cons_init); 152console_initcall(xen_cons_init);
136 153
154static void raw_console_write(const char *str, int len)
155{
156 while(len > 0) {
157 int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
158 if (rc <= 0)
159 break;
160
161 str += rc;
162 len -= rc;
163 }
164}
165
166#ifdef CONFIG_EARLY_PRINTK
137static void xenboot_write_console(struct console *console, const char *string, 167static void xenboot_write_console(struct console *console, const char *string,
138 unsigned len) 168 unsigned len)
139{ 169{
140 unsigned int linelen, off = 0; 170 unsigned int linelen, off = 0;
141 const char *pos; 171 const char *pos;
142 172
173 raw_console_write(string, len);
174
175 write_console(0, "(early) ", 8);
143 while (off < len && NULL != (pos = strchr(string+off, '\n'))) { 176 while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
144 linelen = pos-string+off; 177 linelen = pos-string+off;
145 if (off + linelen > len) 178 if (off + linelen > len)
@@ -155,5 +188,23 @@ static void xenboot_write_console(struct console *console, const char *string,
155struct console xenboot_console = { 188struct console xenboot_console = {
156 .name = "xenboot", 189 .name = "xenboot",
157 .write = xenboot_write_console, 190 .write = xenboot_write_console,
158 .flags = CON_PRINTBUFFER | CON_BOOT, 191 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
159}; 192};
193#endif /* CONFIG_EARLY_PRINTK */
194
195void xen_raw_console_write(const char *str)
196{
197 raw_console_write(str, strlen(str));
198}
199
200void xen_raw_printk(const char *fmt, ...)
201{
202 static char buf[512];
203 va_list ap;
204
205 va_start(ap, fmt);
206 vsnprintf(buf, sizeof(buf), fmt, ap);
207 va_end(ap);
208
209 xen_raw_console_write(buf);
210}
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4a933d413423..59ca35156d81 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -32,8 +32,9 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h> 34#include <linux/bitrev.h>
35#include <asm/uaccess.h> 35#include <linux/smp_lock.h>
36#include <asm/io.h> 36#include <linux/uaccess.h>
37#include <linux/io.h>
37 38
38#include <pcmcia/cs_types.h> 39#include <pcmcia/cs_types.h>
39#include <pcmcia/cs.h> 40#include <pcmcia/cs.h>
@@ -1405,11 +1406,11 @@ static void stop_monitor(struct cm4000_dev *dev)
1405 DEBUGP(3, dev, "<- stop_monitor\n"); 1406 DEBUGP(3, dev, "<- stop_monitor\n");
1406} 1407}
1407 1408
1408static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 1409static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1409 unsigned long arg)
1410{ 1410{
1411 struct cm4000_dev *dev = filp->private_data; 1411 struct cm4000_dev *dev = filp->private_data;
1412 unsigned int iobase = dev->p_dev->io.BasePort1; 1412 unsigned int iobase = dev->p_dev->io.BasePort1;
1413 struct inode *inode = filp->f_path.dentry->d_inode;
1413 struct pcmcia_device *link; 1414 struct pcmcia_device *link;
1414 int size; 1415 int size;
1415 int rc; 1416 int rc;
@@ -1426,38 +1427,42 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1426 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), 1427 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
1427 iminor(inode), ioctl_names[_IOC_NR(cmd)]); 1428 iminor(inode), ioctl_names[_IOC_NR(cmd)]);
1428 1429
1430 lock_kernel();
1431 rc = -ENODEV;
1429 link = dev_table[iminor(inode)]; 1432 link = dev_table[iminor(inode)];
1430 if (!pcmcia_dev_present(link)) { 1433 if (!pcmcia_dev_present(link)) {
1431 DEBUGP(4, dev, "DEV_OK false\n"); 1434 DEBUGP(4, dev, "DEV_OK false\n");
1432 return -ENODEV; 1435 goto out;
1433 } 1436 }
1434 1437
1435 if (test_bit(IS_CMM_ABSENT, &dev->flags)) { 1438 if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
1436 DEBUGP(4, dev, "CMM_ABSENT flag set\n"); 1439 DEBUGP(4, dev, "CMM_ABSENT flag set\n");
1437 return -ENODEV; 1440 goto out;
1438 } 1441 }
1442 rc = EINVAL;
1439 1443
1440 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { 1444 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
1441 DEBUGP(4, dev, "ioctype mismatch\n"); 1445 DEBUGP(4, dev, "ioctype mismatch\n");
1442 return -EINVAL; 1446 goto out;
1443 } 1447 }
1444 if (_IOC_NR(cmd) > CM_IOC_MAXNR) { 1448 if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
1445 DEBUGP(4, dev, "iocnr mismatch\n"); 1449 DEBUGP(4, dev, "iocnr mismatch\n");
1446 return -EINVAL; 1450 goto out;
1447 } 1451 }
1448 size = _IOC_SIZE(cmd); 1452 size = _IOC_SIZE(cmd);
1449 rc = 0; 1453 rc = -EFAULT;
1450 DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", 1454 DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
1451 _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); 1455 _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
1452 1456
1453 if (_IOC_DIR(cmd) & _IOC_READ) { 1457 if (_IOC_DIR(cmd) & _IOC_READ) {
1454 if (!access_ok(VERIFY_WRITE, argp, size)) 1458 if (!access_ok(VERIFY_WRITE, argp, size))
1455 return -EFAULT; 1459 goto out;
1456 } 1460 }
1457 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1461 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1458 if (!access_ok(VERIFY_READ, argp, size)) 1462 if (!access_ok(VERIFY_READ, argp, size))
1459 return -EFAULT; 1463 goto out;
1460 } 1464 }
1465 rc = 0;
1461 1466
1462 switch (cmd) { 1467 switch (cmd) {
1463 case CM_IOCGSTATUS: 1468 case CM_IOCGSTATUS:
@@ -1477,9 +1482,9 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1477 if (test_bit(IS_BAD_CARD, &dev->flags)) 1482 if (test_bit(IS_BAD_CARD, &dev->flags))
1478 status |= CM_BAD_CARD; 1483 status |= CM_BAD_CARD;
1479 if (copy_to_user(argp, &status, sizeof(int))) 1484 if (copy_to_user(argp, &status, sizeof(int)))
1480 return -EFAULT; 1485 rc = -EFAULT;
1481 } 1486 }
1482 return 0; 1487 break;
1483 case CM_IOCGATR: 1488 case CM_IOCGATR:
1484 DEBUGP(4, dev, "... in CM_IOCGATR\n"); 1489 DEBUGP(4, dev, "... in CM_IOCGATR\n");
1485 { 1490 {
@@ -1492,25 +1497,29 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1492 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) 1497 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
1493 != 0)))) { 1498 != 0)))) {
1494 if (filp->f_flags & O_NONBLOCK) 1499 if (filp->f_flags & O_NONBLOCK)
1495 return -EAGAIN; 1500 rc = -EAGAIN;
1496 return -ERESTARTSYS; 1501 else
1502 rc = -ERESTARTSYS;
1503 break;
1497 } 1504 }
1498 1505
1506 rc = -EFAULT;
1499 if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { 1507 if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
1500 tmp = -1; 1508 tmp = -1;
1501 if (copy_to_user(&(atreq->atr_len), &tmp, 1509 if (copy_to_user(&(atreq->atr_len), &tmp,
1502 sizeof(int))) 1510 sizeof(int)))
1503 return -EFAULT; 1511 break;
1504 } else { 1512 } else {
1505 if (copy_to_user(atreq->atr, dev->atr, 1513 if (copy_to_user(atreq->atr, dev->atr,
1506 dev->atr_len)) 1514 dev->atr_len))
1507 return -EFAULT; 1515 break;
1508 1516
1509 tmp = dev->atr_len; 1517 tmp = dev->atr_len;
1510 if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) 1518 if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
1511 return -EFAULT; 1519 break;
1512 } 1520 }
1513 return 0; 1521 rc = 0;
1522 break;
1514 } 1523 }
1515 case CM_IOCARDOFF: 1524 case CM_IOCARDOFF:
1516 1525
@@ -1538,8 +1547,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1538 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) 1547 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
1539 == 0)))) { 1548 == 0)))) {
1540 if (filp->f_flags & O_NONBLOCK) 1549 if (filp->f_flags & O_NONBLOCK)
1541 return -EAGAIN; 1550 rc = -EAGAIN;
1542 return -ERESTARTSYS; 1551 else
1552 rc = -ERESTARTSYS;
1553 break;
1543 } 1554 }
1544 /* Set Flags0 = 0x42 */ 1555 /* Set Flags0 = 0x42 */
1545 DEBUGP(4, dev, "Set Flags0=0x42 \n"); 1556 DEBUGP(4, dev, "Set Flags0=0x42 \n");
@@ -1554,8 +1565,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1554 || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != 1565 || (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
1555 0)))) { 1566 0)))) {
1556 if (filp->f_flags & O_NONBLOCK) 1567 if (filp->f_flags & O_NONBLOCK)
1557 return -EAGAIN; 1568 rc = -EAGAIN;
1558 return -ERESTARTSYS; 1569 else
1570 rc = -ERESTARTSYS;
1571 break;
1559 } 1572 }
1560 } 1573 }
1561 /* release lock */ 1574 /* release lock */
@@ -1568,8 +1581,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1568 struct ptsreq krnptsreq; 1581 struct ptsreq krnptsreq;
1569 1582
1570 if (copy_from_user(&krnptsreq, argp, 1583 if (copy_from_user(&krnptsreq, argp,
1571 sizeof(struct ptsreq))) 1584 sizeof(struct ptsreq))) {
1572 return -EFAULT; 1585 rc = -EFAULT;
1586 break;
1587 }
1573 1588
1574 rc = 0; 1589 rc = 0;
1575 DEBUGP(4, dev, "... in CM_IOCSPTS\n"); 1590 DEBUGP(4, dev, "... in CM_IOCSPTS\n");
@@ -1580,8 +1595,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1580 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) 1595 || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
1581 != 0)))) { 1596 != 0)))) {
1582 if (filp->f_flags & O_NONBLOCK) 1597 if (filp->f_flags & O_NONBLOCK)
1583 return -EAGAIN; 1598 rc = -EAGAIN;
1584 return -ERESTARTSYS; 1599 else
1600 rc = -ERESTARTSYS;
1601 break;
1585 } 1602 }
1586 /* get IO lock */ 1603 /* get IO lock */
1587 if (wait_event_interruptible 1604 if (wait_event_interruptible
@@ -1590,8 +1607,10 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1590 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) 1607 || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
1591 == 0)))) { 1608 == 0)))) {
1592 if (filp->f_flags & O_NONBLOCK) 1609 if (filp->f_flags & O_NONBLOCK)
1593 return -EAGAIN; 1610 rc = -EAGAIN;
1594 return -ERESTARTSYS; 1611 else
1612 rc = -ERESTARTSYS;
1613 break;
1595 } 1614 }
1596 1615
1597 if ((rc = set_protocol(dev, &krnptsreq)) != 0) { 1616 if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
@@ -1604,7 +1623,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1604 wake_up_interruptible(&dev->ioq); 1623 wake_up_interruptible(&dev->ioq);
1605 1624
1606 } 1625 }
1607 return rc; 1626 break;
1608#ifdef PCMCIA_DEBUG 1627#ifdef PCMCIA_DEBUG
1609 case CM_IOSDBGLVL: /* set debug log level */ 1628 case CM_IOSDBGLVL: /* set debug log level */
1610 { 1629 {
@@ -1612,18 +1631,20 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1612 1631
1613 old_pc_debug = pc_debug; 1632 old_pc_debug = pc_debug;
1614 if (copy_from_user(&pc_debug, argp, sizeof(int))) 1633 if (copy_from_user(&pc_debug, argp, sizeof(int)))
1615 return -EFAULT; 1634 rc = -EFAULT;
1616 1635 else if (old_pc_debug != pc_debug)
1617 if (old_pc_debug != pc_debug)
1618 DEBUGP(0, dev, "Changed debug log level " 1636 DEBUGP(0, dev, "Changed debug log level "
1619 "to %i\n", pc_debug); 1637 "to %i\n", pc_debug);
1620 } 1638 }
1621 return rc; 1639 break;
1622#endif 1640#endif
1623 default: 1641 default:
1624 DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); 1642 DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
1625 return -EINVAL; 1643 rc = -ENOTTY;
1626 } 1644 }
1645out:
1646 unlock_kernel();
1647 return rc;
1627} 1648}
1628 1649
1629static int cmm_open(struct inode *inode, struct file *filp) 1650static int cmm_open(struct inode *inode, struct file *filp)
@@ -1631,16 +1652,22 @@ static int cmm_open(struct inode *inode, struct file *filp)
1631 struct cm4000_dev *dev; 1652 struct cm4000_dev *dev;
1632 struct pcmcia_device *link; 1653 struct pcmcia_device *link;
1633 int minor = iminor(inode); 1654 int minor = iminor(inode);
1655 int ret;
1634 1656
1635 if (minor >= CM4000_MAX_DEV) 1657 if (minor >= CM4000_MAX_DEV)
1636 return -ENODEV; 1658 return -ENODEV;
1637 1659
1660 lock_kernel();
1638 link = dev_table[minor]; 1661 link = dev_table[minor];
1639 if (link == NULL || !pcmcia_dev_present(link)) 1662 if (link == NULL || !pcmcia_dev_present(link)) {
1640 return -ENODEV; 1663 ret = -ENODEV;
1664 goto out;
1665 }
1641 1666
1642 if (link->open) 1667 if (link->open) {
1643 return -EBUSY; 1668 ret = -EBUSY;
1669 goto out;
1670 }
1644 1671
1645 dev = link->priv; 1672 dev = link->priv;
1646 filp->private_data = dev; 1673 filp->private_data = dev;
@@ -1660,8 +1687,10 @@ static int cmm_open(struct inode *inode, struct file *filp)
1660 * vaild = block until valid (or card 1687 * vaild = block until valid (or card
1661 * inserted) 1688 * inserted)
1662 */ 1689 */
1663 if (filp->f_flags & O_NONBLOCK) 1690 if (filp->f_flags & O_NONBLOCK) {
1664 return -EAGAIN; 1691 ret = -EAGAIN;
1692 goto out;
1693 }
1665 1694
1666 dev->mdelay = T_50MSEC; 1695 dev->mdelay = T_50MSEC;
1667 1696
@@ -1671,7 +1700,10 @@ static int cmm_open(struct inode *inode, struct file *filp)
1671 link->open = 1; /* only one open per device */ 1700 link->open = 1; /* only one open per device */
1672 1701
1673 DEBUGP(2, dev, "<- cmm_open\n"); 1702 DEBUGP(2, dev, "<- cmm_open\n");
1674 return nonseekable_open(inode, filp); 1703 ret = nonseekable_open(inode, filp);
1704out:
1705 unlock_kernel();
1706 return ret;
1675} 1707}
1676 1708
1677static int cmm_close(struct inode *inode, struct file *filp) 1709static int cmm_close(struct inode *inode, struct file *filp)
@@ -1897,7 +1929,7 @@ static const struct file_operations cm4000_fops = {
1897 .owner = THIS_MODULE, 1929 .owner = THIS_MODULE,
1898 .read = cmm_read, 1930 .read = cmm_read,
1899 .write = cmm_write, 1931 .write = cmm_write,
1900 .ioctl = cmm_ioctl, 1932 .unlocked_ioctl = cmm_ioctl,
1901 .open = cmm_open, 1933 .open = cmm_open,
1902 .release= cmm_close, 1934 .release= cmm_close,
1903}; 1935};
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 035084c07329..6181f8a9b0bd 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/poll.h> 28#include <linux/poll.h>
29#include <linux/smp_lock.h>
29#include <linux/wait.h> 30#include <linux/wait.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/io.h> 32#include <asm/io.h>
@@ -448,23 +449,30 @@ static int cm4040_open(struct inode *inode, struct file *filp)
448 struct reader_dev *dev; 449 struct reader_dev *dev;
449 struct pcmcia_device *link; 450 struct pcmcia_device *link;
450 int minor = iminor(inode); 451 int minor = iminor(inode);
452 int ret;
451 453
452 if (minor >= CM_MAX_DEV) 454 if (minor >= CM_MAX_DEV)
453 return -ENODEV; 455 return -ENODEV;
454 456
457 lock_kernel();
455 link = dev_table[minor]; 458 link = dev_table[minor];
456 if (link == NULL || !pcmcia_dev_present(link)) 459 if (link == NULL || !pcmcia_dev_present(link)) {
457 return -ENODEV; 460 ret = -ENODEV;
461 goto out;
462 }
458 463
459 if (link->open) 464 if (link->open) {
460 return -EBUSY; 465 ret = -EBUSY;
466 goto out;
467 }
461 468
462 dev = link->priv; 469 dev = link->priv;
463 filp->private_data = dev; 470 filp->private_data = dev;
464 471
465 if (filp->f_flags & O_NONBLOCK) { 472 if (filp->f_flags & O_NONBLOCK) {
466 DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); 473 DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
467 return -EAGAIN; 474 ret = -EAGAIN;
475 goto out;
468 } 476 }
469 477
470 link->open = 1; 478 link->open = 1;
@@ -473,7 +481,10 @@ static int cm4040_open(struct inode *inode, struct file *filp)
473 mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); 481 mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
474 482
475 DEBUGP(2, dev, "<- cm4040_open (successfully)\n"); 483 DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
476 return nonseekable_open(inode, filp); 484 ret = nonseekable_open(inode, filp);
485out:
486 unlock_kernel();
487 return ret;
477} 488}
478 489
479static int cm4040_close(struct inode *inode, struct file *filp) 490static int cm4040_close(struct inode *inode, struct file *filp)
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 00c7f8407e3e..cc7dcea2d283 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -28,7 +28,6 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <pcmcia/version.h>
32#include <pcmcia/cisreg.h> 31#include <pcmcia/cisreg.h>
33#include <pcmcia/device_id.h> 32#include <pcmcia/device_id.h>
34#include <pcmcia/ss.h> 33#include <pcmcia/ss.h>
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43b71b69daa5..e522144cba3a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
174 Select this option if you want to enable the random number generator 174 Select this option if you want to enable the random number generator
175 on the HIFN 795x crypto adapters. 175 on the HIFN 795x crypto adapters.
176 176
177config CRYPTO_DEV_TALITOS
178 tristate "Talitos Freescale Security Engine (SEC)"
179 select CRYPTO_ALGAPI
180 select CRYPTO_AUTHENC
181 select HW_RANDOM
182 depends on FSL_SOC
183 help
184 Say 'Y' here to use the Freescale Security Engine (SEC)
185 to offload cryptographic algorithm computation.
186
187 The Freescale SEC is present on PowerQUICC 'E' processors, such
188 as the MPC8349E and MPC8548E.
189
190 To compile this driver as a module, choose M here: the module
191 will be called talitos.
192
193config CRYPTO_DEV_IXP4XX
194 tristate "Driver for IXP4xx crypto hardware acceleration"
195 depends on ARCH_IXP4XX
196 select CRYPTO_DES
197 select CRYPTO_ALGAPI
198 select CRYPTO_AUTHENC
199 select CRYPTO_BLKCIPHER
200 help
201 Driver for the IXP4xx NPE crypto engine.
202
177endif # CRYPTO_HW 203endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c0327f0dadc5..73557b2968d3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 81f3f950cd7d..4d22b21bd3e3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -29,7 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h> 32#include <linux/crypto.h>
34#include <linux/hw_random.h> 33#include <linux/hw_random.h>
35#include <linux/ktime.h> 34#include <linux/ktime.h>
@@ -369,7 +368,9 @@ static atomic_t hifn_dev_number;
369#define HIFN_D_DST_RSIZE 80*4 368#define HIFN_D_DST_RSIZE 80*4
370#define HIFN_D_RES_RSIZE 24*4 369#define HIFN_D_RES_RSIZE 24*4
371 370
372#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5 371#define HIFN_D_DST_DALIGN 4
372
373#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
373 374
374#define AES_MIN_KEY_SIZE 16 375#define AES_MIN_KEY_SIZE 16
375#define AES_MAX_KEY_SIZE 32 376#define AES_MAX_KEY_SIZE 32
@@ -535,10 +536,10 @@ struct hifn_crypt_command
535 */ 536 */
536struct hifn_mac_command 537struct hifn_mac_command
537{ 538{
538 volatile u16 masks; 539 volatile __le16 masks;
539 volatile u16 header_skip; 540 volatile __le16 header_skip;
540 volatile u16 source_count; 541 volatile __le16 source_count;
541 volatile u16 reserved; 542 volatile __le16 reserved;
542}; 543};
543 544
544#define HIFN_MAC_CMD_ALG_MASK 0x0001 545#define HIFN_MAC_CMD_ALG_MASK 0x0001
@@ -564,10 +565,10 @@ struct hifn_mac_command
564 565
565struct hifn_comp_command 566struct hifn_comp_command
566{ 567{
567 volatile u16 masks; 568 volatile __le16 masks;
568 volatile u16 header_skip; 569 volatile __le16 header_skip;
569 volatile u16 source_count; 570 volatile __le16 source_count;
570 volatile u16 reserved; 571 volatile __le16 reserved;
571}; 572};
572 573
573#define HIFN_COMP_CMD_SRCLEN_M 0xc000 574#define HIFN_COMP_CMD_SRCLEN_M 0xc000
@@ -583,10 +584,10 @@ struct hifn_comp_command
583 584
584struct hifn_base_result 585struct hifn_base_result
585{ 586{
586 volatile u16 flags; 587 volatile __le16 flags;
587 volatile u16 session; 588 volatile __le16 session;
588 volatile u16 src_cnt; /* 15:0 of source count */ 589 volatile __le16 src_cnt; /* 15:0 of source count */
589 volatile u16 dst_cnt; /* 15:0 of dest count */ 590 volatile __le16 dst_cnt; /* 15:0 of dest count */
590}; 591};
591 592
592#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ 593#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
@@ -597,8 +598,8 @@ struct hifn_base_result
597 598
598struct hifn_comp_result 599struct hifn_comp_result
599{ 600{
600 volatile u16 flags; 601 volatile __le16 flags;
601 volatile u16 crc; 602 volatile __le16 crc;
602}; 603};
603 604
604#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ 605#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
@@ -609,8 +610,8 @@ struct hifn_comp_result
609 610
610struct hifn_mac_result 611struct hifn_mac_result
611{ 612{
612 volatile u16 flags; 613 volatile __le16 flags;
613 volatile u16 reserved; 614 volatile __le16 reserved;
614 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ 615 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
615}; 616};
616 617
@@ -619,8 +620,8 @@ struct hifn_mac_result
619 620
620struct hifn_crypt_result 621struct hifn_crypt_result
621{ 622{
622 volatile u16 flags; 623 volatile __le16 flags;
623 volatile u16 reserved; 624 volatile __le16 reserved;
624}; 625};
625 626
626#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ 627#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
@@ -686,12 +687,12 @@ static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
686 687
687static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) 688static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
688{ 689{
689 writel(val, dev->bar[0] + reg); 690 writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
690} 691}
691 692
692static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) 693static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
693{ 694{
694 writel(val, dev->bar[1] + reg); 695 writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
695} 696}
696 697
697static void hifn_wait_puc(struct hifn_device *dev) 698static void hifn_wait_puc(struct hifn_device *dev)
@@ -894,7 +895,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
894 char *offtbl = NULL; 895 char *offtbl = NULL;
895 int i; 896 int i;
896 897
897 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 898 for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
898 if (pci2id[i].pci_vendor == dev->pdev->vendor && 899 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
899 pci2id[i].pci_prod == dev->pdev->device) { 900 pci2id[i].pci_prod == dev->pdev->device) {
900 offtbl = pci2id[i].card_id; 901 offtbl = pci2id[i].card_id;
@@ -1037,14 +1038,14 @@ static void hifn_init_registers(struct hifn_device *dev)
1037 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1038 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1038 1039
1039 /* write all 4 ring address registers */ 1040 /* write all 4 ring address registers */
1040 hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr + 1041 hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
1041 offsetof(struct hifn_dma, cmdr[0]))); 1042 offsetof(struct hifn_dma, cmdr[0]));
1042 hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr + 1043 hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
1043 offsetof(struct hifn_dma, srcr[0]))); 1044 offsetof(struct hifn_dma, srcr[0]));
1044 hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr + 1045 hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
1045 offsetof(struct hifn_dma, dstr[0]))); 1046 offsetof(struct hifn_dma, dstr[0]));
1046 hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr + 1047 hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
1047 offsetof(struct hifn_dma, resr[0]))); 1048 offsetof(struct hifn_dma, resr[0]));
1048 1049
1049 mdelay(2); 1050 mdelay(2);
1050#if 0 1051#if 0
@@ -1166,109 +1167,15 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
1166 return cmd_len; 1167 return cmd_len;
1167} 1168}
1168 1169
1169static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, 1170static int hifn_setup_cmd_desc(struct hifn_device *dev,
1170 unsigned int offset, unsigned int size) 1171 struct hifn_context *ctx, void *priv, unsigned int nbytes)
1171{
1172 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1173 int idx;
1174 dma_addr_t addr;
1175
1176 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1177
1178 idx = dma->srci;
1179
1180 dma->srcr[idx].p = __cpu_to_le32(addr);
1181 dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
1182 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
1183
1184 if (++idx == HIFN_D_SRC_RSIZE) {
1185 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1186 HIFN_D_JUMP |
1187 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1188 idx = 0;
1189 }
1190
1191 dma->srci = idx;
1192 dma->srcu++;
1193
1194 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1195 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1196 dev->flags |= HIFN_FLAG_SRC_BUSY;
1197 }
1198
1199 return size;
1200}
1201
1202static void hifn_setup_res_desc(struct hifn_device *dev)
1203{
1204 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1205
1206 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1207 HIFN_D_VALID | HIFN_D_LAST);
1208 /*
1209 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1210 * HIFN_D_LAST | HIFN_D_NOINVALID);
1211 */
1212
1213 if (++dma->resi == HIFN_D_RES_RSIZE) {
1214 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1215 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1216 dma->resi = 0;
1217 }
1218
1219 dma->resu++;
1220
1221 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1222 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1223 dev->flags |= HIFN_FLAG_RES_BUSY;
1224 }
1225}
1226
1227static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1228 unsigned offset, unsigned size)
1229{
1230 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1231 int idx;
1232 dma_addr_t addr;
1233
1234 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1235
1236 idx = dma->dsti;
1237 dma->dstr[idx].p = __cpu_to_le32(addr);
1238 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1239 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
1240
1241 if (++idx == HIFN_D_DST_RSIZE) {
1242 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1243 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1244 HIFN_D_LAST | HIFN_D_NOINVALID);
1245 idx = 0;
1246 }
1247 dma->dsti = idx;
1248 dma->dstu++;
1249
1250 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1251 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1252 dev->flags |= HIFN_FLAG_DST_BUSY;
1253 }
1254}
1255
1256static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1257 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1258 struct hifn_context *ctx)
1259{ 1172{
1260 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1173 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1261 int cmd_len, sa_idx; 1174 int cmd_len, sa_idx;
1262 u8 *buf, *buf_pos; 1175 u8 *buf, *buf_pos;
1263 u16 mask; 1176 u16 mask;
1264 1177
1265 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", 1178 sa_idx = dma->cmdi;
1266 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1267
1268 sa_idx = dma->resi;
1269
1270 hifn_setup_src_desc(dev, spage, soff, nbytes);
1271
1272 buf_pos = buf = dma->command_bufs[dma->cmdi]; 1179 buf_pos = buf = dma->command_bufs[dma->cmdi];
1273 1180
1274 mask = 0; 1181 mask = 0;
@@ -1370,16 +1277,113 @@ static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned
1370 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1277 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1371 dev->flags |= HIFN_FLAG_CMD_BUSY; 1278 dev->flags |= HIFN_FLAG_CMD_BUSY;
1372 } 1279 }
1373
1374 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1375 hifn_setup_res_desc(dev);
1376
1377 return 0; 1280 return 0;
1378 1281
1379err_out: 1282err_out:
1380 return -EINVAL; 1283 return -EINVAL;
1381} 1284}
1382 1285
1286static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1287 unsigned int offset, unsigned int size)
1288{
1289 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1290 int idx;
1291 dma_addr_t addr;
1292
1293 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1294
1295 idx = dma->srci;
1296
1297 dma->srcr[idx].p = __cpu_to_le32(addr);
1298 dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1299 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1300
1301 if (++idx == HIFN_D_SRC_RSIZE) {
1302 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1303 HIFN_D_JUMP |
1304 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1305 idx = 0;
1306 }
1307
1308 dma->srci = idx;
1309 dma->srcu++;
1310
1311 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1312 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1313 dev->flags |= HIFN_FLAG_SRC_BUSY;
1314 }
1315
1316 return size;
1317}
1318
1319static void hifn_setup_res_desc(struct hifn_device *dev)
1320{
1321 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1322
1323 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1324 HIFN_D_VALID | HIFN_D_LAST);
1325 /*
1326 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1327 * HIFN_D_LAST);
1328 */
1329
1330 if (++dma->resi == HIFN_D_RES_RSIZE) {
1331 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1332 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1333 dma->resi = 0;
1334 }
1335
1336 dma->resu++;
1337
1338 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1339 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1340 dev->flags |= HIFN_FLAG_RES_BUSY;
1341 }
1342}
1343
1344static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1345 unsigned offset, unsigned size)
1346{
1347 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1348 int idx;
1349 dma_addr_t addr;
1350
1351 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1352
1353 idx = dma->dsti;
1354 dma->dstr[idx].p = __cpu_to_le32(addr);
1355 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1356 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1357
1358 if (++idx == HIFN_D_DST_RSIZE) {
1359 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1360 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1361 HIFN_D_LAST);
1362 idx = 0;
1363 }
1364 dma->dsti = idx;
1365 dma->dstu++;
1366
1367 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1368 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1369 dev->flags |= HIFN_FLAG_DST_BUSY;
1370 }
1371}
1372
1373static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1374 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1375 struct hifn_context *ctx)
1376{
1377 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1378 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1379
1380 hifn_setup_src_desc(dev, spage, soff, nbytes);
1381 hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
1382 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1383 hifn_setup_res_desc(dev);
1384 return 0;
1385}
1386
1383static int ablkcipher_walk_init(struct ablkcipher_walk *w, 1387static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1384 int num, gfp_t gfp_flags) 1388 int num, gfp_t gfp_flags)
1385{ 1389{
@@ -1431,7 +1435,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1431 return -EINVAL; 1435 return -EINVAL;
1432 1436
1433 while (size) { 1437 while (size) {
1434 copy = min(drest, src->length); 1438 copy = min(drest, min(size, src->length));
1435 1439
1436 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); 1440 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1437 memcpy(daddr, saddr + src->offset, copy); 1441 memcpy(daddr, saddr + src->offset, copy);
@@ -1458,10 +1462,6 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1458static int ablkcipher_walk(struct ablkcipher_request *req, 1462static int ablkcipher_walk(struct ablkcipher_request *req,
1459 struct ablkcipher_walk *w) 1463 struct ablkcipher_walk *w)
1460{ 1464{
1461 unsigned blocksize =
1462 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1463 unsigned alignmask =
1464 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1465 struct scatterlist *src, *dst, *t; 1465 struct scatterlist *src, *dst, *t;
1466 void *daddr; 1466 void *daddr;
1467 unsigned int nbytes = req->nbytes, offset, copy, diff; 1467 unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1477,16 +1477,14 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1477 dst = &req->dst[idx]; 1477 dst = &req->dst[idx];
1478 1478
1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " 1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1480 "blocksize: %u, nbytes: %u.\n", 1480 "nbytes: %u.\n",
1481 __func__, src->length, dst->length, src->offset, 1481 __func__, src->length, dst->length, src->offset,
1482 dst->offset, offset, blocksize, nbytes); 1482 dst->offset, offset, nbytes);
1483 1483
1484 if (src->length & (blocksize - 1) || 1484 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1485 src->offset & (alignmask - 1) || 1485 !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
1486 dst->length & (blocksize - 1) || 1486 offset) {
1487 dst->offset & (alignmask - 1) || 1487 unsigned slen = min(src->length - offset, nbytes);
1488 offset) {
1489 unsigned slen = src->length - offset;
1490 unsigned dlen = PAGE_SIZE; 1488 unsigned dlen = PAGE_SIZE;
1491 1489
1492 t = &w->cache[idx]; 1490 t = &w->cache[idx];
@@ -1498,8 +1496,8 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1498 1496
1499 idx += err; 1497 idx += err;
1500 1498
1501 copy = slen & ~(blocksize - 1); 1499 copy = slen & ~(HIFN_D_DST_DALIGN - 1);
1502 diff = slen & (blocksize - 1); 1500 diff = slen & (HIFN_D_DST_DALIGN - 1);
1503 1501
1504 if (dlen < nbytes) { 1502 if (dlen < nbytes) {
1505 /* 1503 /*
@@ -1507,7 +1505,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1507 * to put there additional blocksized chunk, 1505 * to put there additional blocksized chunk,
1508 * so we mark that page as containing only 1506 * so we mark that page as containing only
1509 * blocksize aligned chunks: 1507 * blocksize aligned chunks:
1510 * t->length = (slen & ~(blocksize - 1)); 1508 * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
1511 * and increase number of bytes to be processed 1509 * and increase number of bytes to be processed
1512 * in next chunk: 1510 * in next chunk:
1513 * nbytes += diff; 1511 * nbytes += diff;
@@ -1544,7 +1542,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1544 1542
1545 kunmap_atomic(daddr, KM_SOFTIRQ0); 1543 kunmap_atomic(daddr, KM_SOFTIRQ0);
1546 } else { 1544 } else {
1547 nbytes -= src->length; 1545 nbytes -= min(src->length, nbytes);
1548 idx++; 1546 idx++;
1549 } 1547 }
1550 1548
@@ -1563,14 +1561,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1563 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 1561 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1564 struct hifn_device *dev = ctx->dev; 1562 struct hifn_device *dev = ctx->dev;
1565 struct page *spage, *dpage; 1563 struct page *spage, *dpage;
1566 unsigned long soff, doff, flags; 1564 unsigned long soff, doff, dlen, flags;
1567 unsigned int nbytes = req->nbytes, idx = 0, len; 1565 unsigned int nbytes = req->nbytes, idx = 0, len;
1568 int err = -EINVAL, sg_num; 1566 int err = -EINVAL, sg_num;
1569 struct scatterlist *src, *dst, *t; 1567 struct scatterlist *src, *dst, *t;
1570 unsigned blocksize =
1571 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1572 unsigned alignmask =
1573 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1574 1568
1575 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) 1569 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1576 goto err_out_exit; 1570 goto err_out_exit;
@@ -1578,17 +1572,14 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1578 ctx->walk.flags = 0; 1572 ctx->walk.flags = 0;
1579 1573
1580 while (nbytes) { 1574 while (nbytes) {
1581 src = &req->src[idx];
1582 dst = &req->dst[idx]; 1575 dst = &req->dst[idx];
1576 dlen = min(dst->length, nbytes);
1583 1577
1584 if (src->length & (blocksize - 1) || 1578 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1585 src->offset & (alignmask - 1) || 1579 !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
1586 dst->length & (blocksize - 1) ||
1587 dst->offset & (alignmask - 1)) {
1588 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; 1580 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1589 }
1590 1581
1591 nbytes -= src->length; 1582 nbytes -= dlen;
1592 idx++; 1583 idx++;
1593 } 1584 }
1594 1585
@@ -1602,7 +1593,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1602 idx = 0; 1593 idx = 0;
1603 1594
1604 sg_num = ablkcipher_walk(req, &ctx->walk); 1595 sg_num = ablkcipher_walk(req, &ctx->walk);
1605 1596 if (sg_num < 0) {
1597 err = sg_num;
1598 goto err_out_exit;
1599 }
1606 atomic_set(&ctx->sg_num, sg_num); 1600 atomic_set(&ctx->sg_num, sg_num);
1607 1601
1608 spin_lock_irqsave(&dev->lock, flags); 1602 spin_lock_irqsave(&dev->lock, flags);
@@ -1640,7 +1634,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1640 if (err) 1634 if (err)
1641 goto err_out; 1635 goto err_out;
1642 1636
1643 nbytes -= len; 1637 nbytes -= min(len, nbytes);
1644 } 1638 }
1645 1639
1646 dev->active = HIFN_DEFAULT_ACTIVE_NUM; 1640 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
@@ -1651,7 +1645,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1651err_out: 1645err_out:
1652 spin_unlock_irqrestore(&dev->lock, flags); 1646 spin_unlock_irqrestore(&dev->lock, flags);
1653err_out_exit: 1647err_out_exit:
1654 if (err && printk_ratelimit()) 1648 if (err)
1655 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " 1649 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1656 "type: %u, err: %d.\n", 1650 "type: %u, err: %d.\n",
1657 dev->name, ctx->iv, ctx->ivsize, 1651 dev->name, ctx->iv, ctx->ivsize,
@@ -1745,8 +1739,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
1745 return -EINVAL; 1739 return -EINVAL;
1746 1740
1747 while (size) { 1741 while (size) {
1748 1742 copy = min(srest, min(dst->length, size));
1749 copy = min(dst->length, srest);
1750 1743
1751 daddr = kmap_atomic(sg_page(dst), KM_IRQ0); 1744 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1752 memcpy(daddr + dst->offset + offset, saddr, copy); 1745 memcpy(daddr + dst->offset + offset, saddr, copy);
@@ -1803,7 +1796,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1803 sg_page(dst), dst->length, nbytes); 1796 sg_page(dst), dst->length, nbytes);
1804 1797
1805 if (!t->length) { 1798 if (!t->length) {
1806 nbytes -= dst->length; 1799 nbytes -= min(dst->length, nbytes);
1807 idx++; 1800 idx++;
1808 continue; 1801 continue;
1809 } 1802 }
@@ -2202,9 +2195,9 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2202 return err; 2195 return err;
2203 2196
2204 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) 2197 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2205 err = hifn_process_queue(dev); 2198 hifn_process_queue(dev);
2206 2199
2207 return err; 2200 return -EINPROGRESS;
2208} 2201}
2209 2202
2210/* 2203/*
@@ -2364,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2364 * 3DES ECB, CBC, CFB and OFB modes. 2357 * 3DES ECB, CBC, CFB and OFB modes.
2365 */ 2358 */
2366 { 2359 {
2367 .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2360 .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
2368 .ablkcipher = { 2361 .ablkcipher = {
2369 .min_keysize = HIFN_3DES_KEY_LENGTH, 2362 .min_keysize = HIFN_3DES_KEY_LENGTH,
2370 .max_keysize = HIFN_3DES_KEY_LENGTH, 2363 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2374,7 +2367,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2374 }, 2367 },
2375 }, 2368 },
2376 { 2369 {
2377 .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2370 .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
2378 .ablkcipher = { 2371 .ablkcipher = {
2379 .min_keysize = HIFN_3DES_KEY_LENGTH, 2372 .min_keysize = HIFN_3DES_KEY_LENGTH,
2380 .max_keysize = HIFN_3DES_KEY_LENGTH, 2373 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2384,8 +2377,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2384 }, 2377 },
2385 }, 2378 },
2386 { 2379 {
2387 .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2380 .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
2388 .ablkcipher = { 2381 .ablkcipher = {
2382 .ivsize = HIFN_IV_LENGTH,
2389 .min_keysize = HIFN_3DES_KEY_LENGTH, 2383 .min_keysize = HIFN_3DES_KEY_LENGTH,
2390 .max_keysize = HIFN_3DES_KEY_LENGTH, 2384 .max_keysize = HIFN_3DES_KEY_LENGTH,
2391 .setkey = hifn_setkey, 2385 .setkey = hifn_setkey,
@@ -2394,7 +2388,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2394 }, 2388 },
2395 }, 2389 },
2396 { 2390 {
2397 .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2391 .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
2398 .ablkcipher = { 2392 .ablkcipher = {
2399 .min_keysize = HIFN_3DES_KEY_LENGTH, 2393 .min_keysize = HIFN_3DES_KEY_LENGTH,
2400 .max_keysize = HIFN_3DES_KEY_LENGTH, 2394 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2408,7 +2402,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2408 * DES ECB, CBC, CFB and OFB modes. 2402 * DES ECB, CBC, CFB and OFB modes.
2409 */ 2403 */
2410 { 2404 {
2411 .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8, 2405 .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
2412 .ablkcipher = { 2406 .ablkcipher = {
2413 .min_keysize = HIFN_DES_KEY_LENGTH, 2407 .min_keysize = HIFN_DES_KEY_LENGTH,
2414 .max_keysize = HIFN_DES_KEY_LENGTH, 2408 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2418,7 +2412,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2418 }, 2412 },
2419 }, 2413 },
2420 { 2414 {
2421 .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8, 2415 .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
2422 .ablkcipher = { 2416 .ablkcipher = {
2423 .min_keysize = HIFN_DES_KEY_LENGTH, 2417 .min_keysize = HIFN_DES_KEY_LENGTH,
2424 .max_keysize = HIFN_DES_KEY_LENGTH, 2418 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2428,8 +2422,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2428 }, 2422 },
2429 }, 2423 },
2430 { 2424 {
2431 .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8, 2425 .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
2432 .ablkcipher = { 2426 .ablkcipher = {
2427 .ivsize = HIFN_IV_LENGTH,
2433 .min_keysize = HIFN_DES_KEY_LENGTH, 2428 .min_keysize = HIFN_DES_KEY_LENGTH,
2434 .max_keysize = HIFN_DES_KEY_LENGTH, 2429 .max_keysize = HIFN_DES_KEY_LENGTH,
2435 .setkey = hifn_setkey, 2430 .setkey = hifn_setkey,
@@ -2438,7 +2433,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2438 }, 2433 },
2439 }, 2434 },
2440 { 2435 {
2441 .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8, 2436 .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
2442 .ablkcipher = { 2437 .ablkcipher = {
2443 .min_keysize = HIFN_DES_KEY_LENGTH, 2438 .min_keysize = HIFN_DES_KEY_LENGTH,
2444 .max_keysize = HIFN_DES_KEY_LENGTH, 2439 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2452,7 +2447,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2452 * AES ECB, CBC, CFB and OFB modes. 2447 * AES ECB, CBC, CFB and OFB modes.
2453 */ 2448 */
2454 { 2449 {
2455 .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2450 .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
2456 .ablkcipher = { 2451 .ablkcipher = {
2457 .min_keysize = AES_MIN_KEY_SIZE, 2452 .min_keysize = AES_MIN_KEY_SIZE,
2458 .max_keysize = AES_MAX_KEY_SIZE, 2453 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2462,8 +2457,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2462 }, 2457 },
2463 }, 2458 },
2464 { 2459 {
2465 .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16, 2460 .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
2466 .ablkcipher = { 2461 .ablkcipher = {
2462 .ivsize = HIFN_AES_IV_LENGTH,
2467 .min_keysize = AES_MIN_KEY_SIZE, 2463 .min_keysize = AES_MIN_KEY_SIZE,
2468 .max_keysize = AES_MAX_KEY_SIZE, 2464 .max_keysize = AES_MAX_KEY_SIZE,
2469 .setkey = hifn_setkey, 2465 .setkey = hifn_setkey,
@@ -2472,7 +2468,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2472 }, 2468 },
2473 }, 2469 },
2474 { 2470 {
2475 .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2471 .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
2476 .ablkcipher = { 2472 .ablkcipher = {
2477 .min_keysize = AES_MIN_KEY_SIZE, 2473 .min_keysize = AES_MIN_KEY_SIZE,
2478 .max_keysize = AES_MAX_KEY_SIZE, 2474 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2482,7 +2478,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2482 }, 2478 },
2483 }, 2479 },
2484 { 2480 {
2485 .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2481 .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
2486 .ablkcipher = { 2482 .ablkcipher = {
2487 .min_keysize = AES_MIN_KEY_SIZE, 2483 .min_keysize = AES_MIN_KEY_SIZE,
2488 .max_keysize = AES_MAX_KEY_SIZE, 2484 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2514,15 +2510,14 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2514 return -ENOMEM; 2510 return -ENOMEM;
2515 2511
2516 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); 2512 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2517 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name); 2513 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
2514 t->drv_name, dev->name);
2518 2515
2519 alg->alg.cra_priority = 300; 2516 alg->alg.cra_priority = 300;
2520 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 2517 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2521 alg->alg.cra_blocksize = t->bsize; 2518 alg->alg.cra_blocksize = t->bsize;
2522 alg->alg.cra_ctxsize = sizeof(struct hifn_context); 2519 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2523 alg->alg.cra_alignmask = 15; 2520 alg->alg.cra_alignmask = 0;
2524 if (t->bsize == 8)
2525 alg->alg.cra_alignmask = 3;
2526 alg->alg.cra_type = &crypto_ablkcipher_type; 2521 alg->alg.cra_type = &crypto_ablkcipher_type;
2527 alg->alg.cra_module = THIS_MODULE; 2522 alg->alg.cra_module = THIS_MODULE;
2528 alg->alg.cra_u.ablkcipher = t->ablkcipher; 2523 alg->alg.cra_u.ablkcipher = t->ablkcipher;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
new file mode 100644
index 000000000000..42a107fe9233
--- /dev/null
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -0,0 +1,1506 @@
1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20
21#include <crypto/ctr.h>
22#include <crypto/des.h>
23#include <crypto/aes.h>
24#include <crypto/sha.h>
25#include <crypto/algapi.h>
26#include <crypto/aead.h>
27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h>
29
30#include <asm/arch/npe.h>
31#include <asm/arch/qmgr.h>
32
33#define MAX_KEYLEN 32
34
35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36#define NPE_CTX_LEN 80
37#define AES_BLOCK128 16
38
39#define NPE_OP_HASH_VERIFY 0x01
40#define NPE_OP_CCM_ENABLE 0x04
41#define NPE_OP_CRYPT_ENABLE 0x08
42#define NPE_OP_HASH_ENABLE 0x10
43#define NPE_OP_NOT_IN_PLACE 0x20
44#define NPE_OP_HMAC_DISABLE 0x40
45#define NPE_OP_CRYPT_ENCRYPT 0x80
46
47#define NPE_OP_CCM_GEN_MIC 0xcc
48#define NPE_OP_HASH_GEN_ICV 0x50
49#define NPE_OP_ENC_GEN_KEY 0xc9
50
51#define MOD_ECB 0x0000
52#define MOD_CTR 0x1000
53#define MOD_CBC_ENC 0x2000
54#define MOD_CBC_DEC 0x3000
55#define MOD_CCM_ENC 0x4000
56#define MOD_CCM_DEC 0x5000
57
58#define KEYLEN_128 4
59#define KEYLEN_192 6
60#define KEYLEN_256 8
61
62#define CIPH_DECR 0x0000
63#define CIPH_ENCR 0x0400
64
65#define MOD_DES 0x0000
66#define MOD_TDEA2 0x0100
67#define MOD_3DES 0x0200
68#define MOD_AES 0x0800
69#define MOD_AES128 (0x0800 | KEYLEN_128)
70#define MOD_AES192 (0x0900 | KEYLEN_192)
71#define MOD_AES256 (0x0a00 | KEYLEN_256)
72
73#define MAX_IVLEN 16
74#define NPE_ID 2 /* NPE C */
75#define NPE_QLEN 16
76/* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78#define NPE_QLEN_TOTAL 64
79
80#define SEND_QID 29
81#define RECV_QID 30
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_IPAD_VALUE 0x36
92#define HMAC_OPAD_VALUE 0x5C
93#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
94
95#define MD5_DIGEST_SIZE 16
96
97struct buffer_desc {
98 u32 phys_next;
99 u16 buf_len;
100 u16 pkt_len;
101 u32 phys_addr;
102 u32 __reserved[4];
103 struct buffer_desc *next;
104};
105
106struct crypt_ctl {
107 u8 mode; /* NPE_OP_* operation mode */
108 u8 init_len;
109 u16 reserved;
110 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
111 u32 icv_rev_aes; /* icv or rev aes */
112 u32 src_buf;
113 u32 dst_buf;
114 u16 auth_offs; /* Authentication start offset */
115 u16 auth_len; /* Authentication data length */
116 u16 crypt_offs; /* Cryption start offset */
117 u16 crypt_len; /* Cryption data length */
118 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
119 u32 crypto_ctx; /* NPE Crypto Param structure address */
120
121 /* Used by Host: 4*4 bytes*/
122 unsigned ctl_flags;
123 union {
124 struct ablkcipher_request *ablk_req;
125 struct aead_request *aead_req;
126 struct crypto_tfm *tfm;
127 } data;
128 struct buffer_desc *regist_buf;
129 u8 *regist_ptr;
130};
131
132struct ablk_ctx {
133 struct buffer_desc *src;
134 struct buffer_desc *dst;
135 unsigned src_nents;
136 unsigned dst_nents;
137};
138
139struct aead_ctx {
140 struct buffer_desc *buffer;
141 unsigned short assoc_nents;
142 unsigned short src_nents;
143 struct scatterlist ivlist;
144 /* used when the hmac is not on one sg entry */
145 u8 *hmac_virt;
146 int encrypt;
147};
148
149struct ix_hash_algo {
150 u32 cfgword;
151 unsigned char *icv;
152};
153
154struct ix_sa_dir {
155 unsigned char *npe_ctx;
156 dma_addr_t npe_ctx_phys;
157 int npe_ctx_idx;
158 u8 npe_mode;
159};
160
161struct ixp_ctx {
162 struct ix_sa_dir encrypt;
163 struct ix_sa_dir decrypt;
164 int authkey_len;
165 u8 authkey[MAX_KEYLEN];
166 int enckey_len;
167 u8 enckey[MAX_KEYLEN];
168 u8 salt[MAX_IVLEN];
169 u8 nonce[CTR_RFC3686_NONCE_SIZE];
170 unsigned salted;
171 atomic_t configuring;
172 struct completion completion;
173};
174
175struct ixp_alg {
176 struct crypto_alg crypto;
177 const struct ix_hash_algo *hash;
178 u32 cfg_enc;
179 u32 cfg_dec;
180
181 int registered;
182};
183
184static const struct ix_hash_algo hash_alg_md5 = {
185 .cfgword = 0xAA010004,
186 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
187 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
188};
189static const struct ix_hash_algo hash_alg_sha1 = {
190 .cfgword = 0x00000005,
191 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
192 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
193};
194
195static struct npe *npe_c;
196static struct dma_pool *buffer_pool = NULL;
197static struct dma_pool *ctx_pool = NULL;
198
199static struct crypt_ctl *crypt_virt = NULL;
200static dma_addr_t crypt_phys;
201
202static int support_aes = 1;
203
204static void dev_release(struct device *dev)
205{
206 return;
207}
208
209#define DRIVER_NAME "ixp4xx_crypto"
210static struct platform_device pseudo_dev = {
211 .name = DRIVER_NAME,
212 .id = 0,
213 .num_resources = 0,
214 .dev = {
215 .coherent_dma_mask = DMA_32BIT_MASK,
216 .release = dev_release,
217 }
218};
219
220static struct device *dev = &pseudo_dev.dev;
221
222static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
223{
224 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
225}
226
227static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
228{
229 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
230}
231
232static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
233{
234 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
235}
236
237static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
238{
239 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
240}
241
242static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
243{
244 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
245}
246
247static int setup_crypt_desc(void)
248{
249 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
250 crypt_virt = dma_alloc_coherent(dev,
251 NPE_QLEN * sizeof(struct crypt_ctl),
252 &crypt_phys, GFP_KERNEL);
253 if (!crypt_virt)
254 return -ENOMEM;
255 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
256 return 0;
257}
258
259static spinlock_t desc_lock;
260static struct crypt_ctl *get_crypt_desc(void)
261{
262 int i;
263 static int idx = 0;
264 unsigned long flags;
265
266 spin_lock_irqsave(&desc_lock, flags);
267
268 if (unlikely(!crypt_virt))
269 setup_crypt_desc();
270 if (unlikely(!crypt_virt)) {
271 spin_unlock_irqrestore(&desc_lock, flags);
272 return NULL;
273 }
274 i = idx;
275 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
276 if (++idx >= NPE_QLEN)
277 idx = 0;
278 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return crypt_virt +i;
281 } else {
282 spin_unlock_irqrestore(&desc_lock, flags);
283 return NULL;
284 }
285}
286
287static spinlock_t emerg_lock;
288static struct crypt_ctl *get_crypt_desc_emerg(void)
289{
290 int i;
291 static int idx = NPE_QLEN;
292 struct crypt_ctl *desc;
293 unsigned long flags;
294
295 desc = get_crypt_desc();
296 if (desc)
297 return desc;
298 if (unlikely(!crypt_virt))
299 return NULL;
300
301 spin_lock_irqsave(&emerg_lock, flags);
302 i = idx;
303 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
304 if (++idx >= NPE_QLEN_TOTAL)
305 idx = NPE_QLEN;
306 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
307 spin_unlock_irqrestore(&emerg_lock, flags);
308 return crypt_virt +i;
309 } else {
310 spin_unlock_irqrestore(&emerg_lock, flags);
311 return NULL;
312 }
313}
314
315static void free_buf_chain(struct buffer_desc *buf, u32 phys)
316{
317 while (buf) {
318 struct buffer_desc *buf1;
319 u32 phys1;
320
321 buf1 = buf->next;
322 phys1 = buf->phys_next;
323 dma_pool_free(buffer_pool, buf, phys);
324 buf = buf1;
325 phys = phys1;
326 }
327}
328
329static struct tasklet_struct crypto_done_tasklet;
330
331static void finish_scattered_hmac(struct crypt_ctl *crypt)
332{
333 struct aead_request *req = crypt->data.aead_req;
334 struct aead_ctx *req_ctx = aead_request_ctx(req);
335 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
336 int authsize = crypto_aead_authsize(tfm);
337 int decryptlen = req->cryptlen - authsize;
338
339 if (req_ctx->encrypt) {
340 scatterwalk_map_and_copy(req_ctx->hmac_virt,
341 req->src, decryptlen, authsize, 1);
342 }
343 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
344}
345
346static void one_packet(dma_addr_t phys)
347{
348 struct crypt_ctl *crypt;
349 struct ixp_ctx *ctx;
350 int failed;
351 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
352
353 failed = phys & 0x1 ? -EBADMSG : 0;
354 phys &= ~0x3;
355 crypt = crypt_phys2virt(phys);
356
357 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
358 case CTL_FLAG_PERFORM_AEAD: {
359 struct aead_request *req = crypt->data.aead_req;
360 struct aead_ctx *req_ctx = aead_request_ctx(req);
361 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
362 DMA_TO_DEVICE);
363 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
364 dma_unmap_sg(dev, req->src, req_ctx->src_nents,
365 DMA_BIDIRECTIONAL);
366
367 free_buf_chain(req_ctx->buffer, crypt->src_buf);
368 if (req_ctx->hmac_virt) {
369 finish_scattered_hmac(crypt);
370 }
371 req->base.complete(&req->base, failed);
372 break;
373 }
374 case CTL_FLAG_PERFORM_ABLK: {
375 struct ablkcipher_request *req = crypt->data.ablk_req;
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377 int nents;
378 if (req_ctx->dst) {
379 nents = req_ctx->dst_nents;
380 dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
381 free_buf_chain(req_ctx->dst, crypt->dst_buf);
382 src_direction = DMA_TO_DEVICE;
383 }
384 nents = req_ctx->src_nents;
385 dma_unmap_sg(dev, req->src, nents, src_direction);
386 free_buf_chain(req_ctx->src, crypt->src_buf);
387 req->base.complete(&req->base, failed);
388 break;
389 }
390 case CTL_FLAG_GEN_ICV:
391 ctx = crypto_tfm_ctx(crypt->data.tfm);
392 dma_pool_free(ctx_pool, crypt->regist_ptr,
393 crypt->regist_buf->phys_addr);
394 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
395 if (atomic_dec_and_test(&ctx->configuring))
396 complete(&ctx->completion);
397 break;
398 case CTL_FLAG_GEN_REVAES:
399 ctx = crypto_tfm_ctx(crypt->data.tfm);
400 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 default:
405 BUG();
406 }
407 crypt->ctl_flags = CTL_FLAG_UNUSED;
408}
409
410static void irqhandler(void *_unused)
411{
412 tasklet_schedule(&crypto_done_tasklet);
413}
414
415static void crypto_done_action(unsigned long arg)
416{
417 int i;
418
419 for(i=0; i<4; i++) {
420 dma_addr_t phys = qmgr_get_entry(RECV_QID);
421 if (!phys)
422 return;
423 one_packet(phys);
424 }
425 tasklet_schedule(&crypto_done_tasklet);
426}
427
428static int init_ixp_crypto(void)
429{
430 int ret = -ENODEV;
431
432 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
433 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
434 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
435 return ret;
436 }
437 npe_c = npe_request(NPE_ID);
438 if (!npe_c)
439 return ret;
440
441 if (!npe_running(npe_c)) {
442 npe_load_firmware(npe_c, npe_name(npe_c), dev);
443 }
444
445 /* buffer_pool will also be used to sometimes store the hmac,
446 * so assure it is large enough
447 */
448 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
449 buffer_pool = dma_pool_create("buffer", dev,
450 sizeof(struct buffer_desc), 32, 0);
451 ret = -ENOMEM;
452 if (!buffer_pool) {
453 goto err;
454 }
455 ctx_pool = dma_pool_create("context", dev,
456 NPE_CTX_LEN, 16, 0);
457 if (!ctx_pool) {
458 goto err;
459 }
460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
461 if (ret)
462 goto err;
463 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
464 if (ret) {
465 qmgr_release_queue(SEND_QID);
466 goto err;
467 }
468 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
469 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
470
471 qmgr_enable_irq(RECV_QID);
472 return 0;
473err:
474 if (ctx_pool)
475 dma_pool_destroy(ctx_pool);
476 if (buffer_pool)
477 dma_pool_destroy(buffer_pool);
478 npe_release(npe_c);
479 return ret;
480}
481
482static void release_ixp_crypto(void)
483{
484 qmgr_disable_irq(RECV_QID);
485 tasklet_kill(&crypto_done_tasklet);
486
487 qmgr_release_queue(SEND_QID);
488 qmgr_release_queue(RECV_QID);
489
490 dma_pool_destroy(ctx_pool);
491 dma_pool_destroy(buffer_pool);
492
493 npe_release(npe_c);
494
495 if (crypt_virt) {
496 dma_free_coherent(dev,
497 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
498 crypt_virt, crypt_phys);
499 }
500 return;
501}
502
503static void reset_sa_dir(struct ix_sa_dir *dir)
504{
505 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
506 dir->npe_ctx_idx = 0;
507 dir->npe_mode = 0;
508}
509
510static int init_sa_dir(struct ix_sa_dir *dir)
511{
512 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
513 if (!dir->npe_ctx) {
514 return -ENOMEM;
515 }
516 reset_sa_dir(dir);
517 return 0;
518}
519
520static void free_sa_dir(struct ix_sa_dir *dir)
521{
522 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
523 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
524}
525
526static int init_tfm(struct crypto_tfm *tfm)
527{
528 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
529 int ret;
530
531 atomic_set(&ctx->configuring, 0);
532 ret = init_sa_dir(&ctx->encrypt);
533 if (ret)
534 return ret;
535 ret = init_sa_dir(&ctx->decrypt);
536 if (ret) {
537 free_sa_dir(&ctx->encrypt);
538 }
539 return ret;
540}
541
542static int init_tfm_ablk(struct crypto_tfm *tfm)
543{
544 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
545 return init_tfm(tfm);
546}
547
548static int init_tfm_aead(struct crypto_tfm *tfm)
549{
550 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
551 return init_tfm(tfm);
552}
553
554static void exit_tfm(struct crypto_tfm *tfm)
555{
556 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
557 free_sa_dir(&ctx->encrypt);
558 free_sa_dir(&ctx->decrypt);
559}
560
561static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
562 int init_len, u32 ctx_addr, const u8 *key, int key_len)
563{
564 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
565 struct crypt_ctl *crypt;
566 struct buffer_desc *buf;
567 int i;
568 u8 *pad;
569 u32 pad_phys, buf_phys;
570
571 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
572 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
573 if (!pad)
574 return -ENOMEM;
575 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
576 if (!buf) {
577 dma_pool_free(ctx_pool, pad, pad_phys);
578 return -ENOMEM;
579 }
580 crypt = get_crypt_desc_emerg();
581 if (!crypt) {
582 dma_pool_free(ctx_pool, pad, pad_phys);
583 dma_pool_free(buffer_pool, buf, buf_phys);
584 return -EAGAIN;
585 }
586
587 memcpy(pad, key, key_len);
588 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
589 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
590 pad[i] ^= xpad;
591 }
592
593 crypt->data.tfm = tfm;
594 crypt->regist_ptr = pad;
595 crypt->regist_buf = buf;
596
597 crypt->auth_offs = 0;
598 crypt->auth_len = HMAC_PAD_BLOCKLEN;
599 crypt->crypto_ctx = ctx_addr;
600 crypt->src_buf = buf_phys;
601 crypt->icv_rev_aes = target;
602 crypt->mode = NPE_OP_HASH_GEN_ICV;
603 crypt->init_len = init_len;
604 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
605
606 buf->next = 0;
607 buf->buf_len = HMAC_PAD_BLOCKLEN;
608 buf->pkt_len = 0;
609 buf->phys_addr = pad_phys;
610
611 atomic_inc(&ctx->configuring);
612 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
613 BUG_ON(qmgr_stat_overflow(SEND_QID));
614 return 0;
615}
616
617static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
618 const u8 *key, int key_len, unsigned digest_len)
619{
620 u32 itarget, otarget, npe_ctx_addr;
621 unsigned char *cinfo;
622 int init_len, ret = 0;
623 u32 cfgword;
624 struct ix_sa_dir *dir;
625 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
626 const struct ix_hash_algo *algo;
627
628 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
629 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
630 algo = ix_hash(tfm);
631
632 /* write cfg word to cryptinfo */
633 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
634 *(u32*)cinfo = cpu_to_be32(cfgword);
635 cinfo += sizeof(cfgword);
636
637 /* write ICV to cryptinfo */
638 memcpy(cinfo, algo->icv, digest_len);
639 cinfo += digest_len;
640
641 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
642 + sizeof(algo->cfgword);
643 otarget = itarget + digest_len;
644 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
645 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
646
647 dir->npe_ctx_idx += init_len;
648 dir->npe_mode |= NPE_OP_HASH_ENABLE;
649
650 if (!encrypt)
651 dir->npe_mode |= NPE_OP_HASH_VERIFY;
652
653 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
654 init_len, npe_ctx_addr, key, key_len);
655 if (ret)
656 return ret;
657 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
658 init_len, npe_ctx_addr, key, key_len);
659}
660
661static int gen_rev_aes_key(struct crypto_tfm *tfm)
662{
663 struct crypt_ctl *crypt;
664 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
665 struct ix_sa_dir *dir = &ctx->decrypt;
666
667 crypt = get_crypt_desc_emerg();
668 if (!crypt) {
669 return -EAGAIN;
670 }
671 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
672
673 crypt->data.tfm = tfm;
674 crypt->crypt_offs = 0;
675 crypt->crypt_len = AES_BLOCK128;
676 crypt->src_buf = 0;
677 crypt->crypto_ctx = dir->npe_ctx_phys;
678 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
679 crypt->mode = NPE_OP_ENC_GEN_KEY;
680 crypt->init_len = dir->npe_ctx_idx;
681 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
682
683 atomic_inc(&ctx->configuring);
684 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
685 BUG_ON(qmgr_stat_overflow(SEND_QID));
686 return 0;
687}
688
689static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
690 const u8 *key, int key_len)
691{
692 u8 *cinfo;
693 u32 cipher_cfg;
694 u32 keylen_cfg = 0;
695 struct ix_sa_dir *dir;
696 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
697 u32 *flags = &tfm->crt_flags;
698
699 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
700 cinfo = dir->npe_ctx;
701
702 if (encrypt) {
703 cipher_cfg = cipher_cfg_enc(tfm);
704 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
705 } else {
706 cipher_cfg = cipher_cfg_dec(tfm);
707 }
708 if (cipher_cfg & MOD_AES) {
709 switch (key_len) {
710 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
711 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
712 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
713 default:
714 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
715 return -EINVAL;
716 }
717 cipher_cfg |= keylen_cfg;
718 } else if (cipher_cfg & MOD_3DES) {
719 const u32 *K = (const u32 *)key;
720 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
721 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
722 {
723 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
724 return -EINVAL;
725 }
726 } else {
727 u32 tmp[DES_EXPKEY_WORDS];
728 if (des_ekey(tmp, key) == 0) {
729 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
730 }
731 }
732 /* write cfg word to cryptinfo */
733 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
734 cinfo += sizeof(cipher_cfg);
735
736 /* write cipher key to cryptinfo */
737 memcpy(cinfo, key, key_len);
738 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
739 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
740 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
741 key_len = DES3_EDE_KEY_SIZE;
742 }
743 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
744 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
745 if ((cipher_cfg & MOD_AES) && !encrypt) {
746 return gen_rev_aes_key(tfm);
747 }
748 return 0;
749}
750
751static int count_sg(struct scatterlist *sg, int nbytes)
752{
753 int i;
754 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
755 nbytes -= sg->length;
756 return i;
757}
758
759static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
760 unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
761{
762 int nents = 0;
763
764 while (nbytes > 0) {
765 struct buffer_desc *next_buf;
766 u32 next_buf_phys;
767 unsigned len = min(nbytes, sg_dma_len(sg));
768
769 nents++;
770 nbytes -= len;
771 if (!buf->phys_addr) {
772 buf->phys_addr = sg_dma_address(sg);
773 buf->buf_len = len;
774 buf->next = NULL;
775 buf->phys_next = 0;
776 goto next;
777 }
778 /* Two consecutive chunks on one page may be handled by the old
779 * buffer descriptor, increased by the length of the new one
780 */
781 if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
782 buf->buf_len += len;
783 goto next;
784 }
785 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
786 if (!next_buf)
787 return NULL;
788 buf->next = next_buf;
789 buf->phys_next = next_buf_phys;
790
791 buf = next_buf;
792 buf->next = NULL;
793 buf->phys_next = 0;
794 buf->phys_addr = sg_dma_address(sg);
795 buf->buf_len = len;
796next:
797 if (nbytes > 0) {
798 sg = sg_next(sg);
799 }
800 }
801 return buf;
802}
803
804static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
805 unsigned int key_len)
806{
807 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
808 u32 *flags = &tfm->base.crt_flags;
809 int ret;
810
811 init_completion(&ctx->completion);
812 atomic_inc(&ctx->configuring);
813
814 reset_sa_dir(&ctx->encrypt);
815 reset_sa_dir(&ctx->decrypt);
816
817 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
818 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
819
820 ret = setup_cipher(&tfm->base, 0, key, key_len);
821 if (ret)
822 goto out;
823 ret = setup_cipher(&tfm->base, 1, key, key_len);
824 if (ret)
825 goto out;
826
827 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
828 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
829 ret = -EINVAL;
830 } else {
831 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
832 }
833 }
834out:
835 if (!atomic_dec_and_test(&ctx->configuring))
836 wait_for_completion(&ctx->completion);
837 return ret;
838}
839
840static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
841 unsigned int key_len)
842{
843 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
844
845 /* the nonce is stored in bytes at end of key */
846 if (key_len < CTR_RFC3686_NONCE_SIZE)
847 return -EINVAL;
848
849 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
850 CTR_RFC3686_NONCE_SIZE);
851
852 key_len -= CTR_RFC3686_NONCE_SIZE;
853 return ablk_setkey(tfm, key, key_len);
854}
855
856static int ablk_perform(struct ablkcipher_request *req, int encrypt)
857{
858 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
859 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
860 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
861 int ret = -ENOMEM;
862 struct ix_sa_dir *dir;
863 struct crypt_ctl *crypt;
864 unsigned int nbytes = req->nbytes, nents;
865 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
866 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
867 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
868 GFP_KERNEL : GFP_ATOMIC;
869
870 if (qmgr_stat_full(SEND_QID))
871 return -EAGAIN;
872 if (atomic_read(&ctx->configuring))
873 return -EAGAIN;
874
875 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
876
877 crypt = get_crypt_desc();
878 if (!crypt)
879 return ret;
880
881 crypt->data.ablk_req = req;
882 crypt->crypto_ctx = dir->npe_ctx_phys;
883 crypt->mode = dir->npe_mode;
884 crypt->init_len = dir->npe_ctx_idx;
885
886 crypt->crypt_offs = 0;
887 crypt->crypt_len = nbytes;
888
889 BUG_ON(ivsize && !req->info);
890 memcpy(crypt->iv, req->info, ivsize);
891 if (req->src != req->dst) {
892 crypt->mode |= NPE_OP_NOT_IN_PLACE;
893 nents = count_sg(req->dst, nbytes);
894 /* This was never tested by Intel
895 * for more than one dst buffer, I think. */
896 BUG_ON(nents != 1);
897 req_ctx->dst_nents = nents;
898 dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
899 req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
900 if (!req_ctx->dst)
901 goto unmap_sg_dest;
902 req_ctx->dst->phys_addr = 0;
903 if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
904 goto free_buf_dest;
905 src_direction = DMA_TO_DEVICE;
906 } else {
907 req_ctx->dst = NULL;
908 req_ctx->dst_nents = 0;
909 }
910 nents = count_sg(req->src, nbytes);
911 req_ctx->src_nents = nents;
912 dma_map_sg(dev, req->src, nents, src_direction);
913
914 req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
915 if (!req_ctx->src)
916 goto unmap_sg_src;
917 req_ctx->src->phys_addr = 0;
918 if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
919 goto free_buf_src;
920
921 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
922 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
923 BUG_ON(qmgr_stat_overflow(SEND_QID));
924 return -EINPROGRESS;
925
926free_buf_src:
927 free_buf_chain(req_ctx->src, crypt->src_buf);
928unmap_sg_src:
929 dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
930free_buf_dest:
931 if (req->src != req->dst) {
932 free_buf_chain(req_ctx->dst, crypt->dst_buf);
933unmap_sg_dest:
934 dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
935 DMA_FROM_DEVICE);
936 }
937 crypt->ctl_flags = CTL_FLAG_UNUSED;
938 return ret;
939}
940
941static int ablk_encrypt(struct ablkcipher_request *req)
942{
943 return ablk_perform(req, 1);
944}
945
946static int ablk_decrypt(struct ablkcipher_request *req)
947{
948 return ablk_perform(req, 0);
949}
950
951static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
952{
953 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
954 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
955 u8 iv[CTR_RFC3686_BLOCK_SIZE];
956 u8 *info = req->info;
957 int ret;
958
959 /* set up counter block */
960 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
961 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
962
963 /* initialize counter portion of counter block */
964 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
965 cpu_to_be32(1);
966
967 req->info = iv;
968 ret = ablk_perform(req, 1);
969 req->info = info;
970 return ret;
971}
972
973static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
974 unsigned int nbytes)
975{
976 int offset = 0;
977
978 if (!nbytes)
979 return 0;
980
981 for (;;) {
982 if (start < offset + sg->length)
983 break;
984
985 offset += sg->length;
986 sg = sg_next(sg);
987 }
988 return (start + nbytes > offset + sg->length);
989}
990
991static int aead_perform(struct aead_request *req, int encrypt,
992 int cryptoffset, int eff_cryptlen, u8 *iv)
993{
994 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
995 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
996 unsigned ivsize = crypto_aead_ivsize(tfm);
997 unsigned authsize = crypto_aead_authsize(tfm);
998 int ret = -ENOMEM;
999 struct ix_sa_dir *dir;
1000 struct crypt_ctl *crypt;
1001 unsigned int cryptlen, nents;
1002 struct buffer_desc *buf;
1003 struct aead_ctx *req_ctx = aead_request_ctx(req);
1004 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1005 GFP_KERNEL : GFP_ATOMIC;
1006
1007 if (qmgr_stat_full(SEND_QID))
1008 return -EAGAIN;
1009 if (atomic_read(&ctx->configuring))
1010 return -EAGAIN;
1011
1012 if (encrypt) {
1013 dir = &ctx->encrypt;
1014 cryptlen = req->cryptlen;
1015 } else {
1016 dir = &ctx->decrypt;
1017 /* req->cryptlen includes the authsize when decrypting */
1018 cryptlen = req->cryptlen -authsize;
1019 eff_cryptlen -= authsize;
1020 }
1021 crypt = get_crypt_desc();
1022 if (!crypt)
1023 return ret;
1024
1025 crypt->data.aead_req = req;
1026 crypt->crypto_ctx = dir->npe_ctx_phys;
1027 crypt->mode = dir->npe_mode;
1028 crypt->init_len = dir->npe_ctx_idx;
1029
1030 crypt->crypt_offs = cryptoffset;
1031 crypt->crypt_len = eff_cryptlen;
1032
1033 crypt->auth_offs = 0;
1034 crypt->auth_len = req->assoclen + ivsize + cryptlen;
1035 BUG_ON(ivsize && !req->iv);
1036 memcpy(crypt->iv, req->iv, ivsize);
1037
1038 if (req->src != req->dst) {
1039 BUG(); /* -ENOTSUP because of my lazyness */
1040 }
1041
1042 req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
1043 if (!req_ctx->buffer)
1044 goto out;
1045 req_ctx->buffer->phys_addr = 0;
1046 /* ASSOC data */
1047 nents = count_sg(req->assoc, req->assoclen);
1048 req_ctx->assoc_nents = nents;
1049 dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
1050 buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
1051 if (!buf)
1052 goto unmap_sg_assoc;
1053 /* IV */
1054 sg_init_table(&req_ctx->ivlist, 1);
1055 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1056 dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1057 buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
1058 if (!buf)
1059 goto unmap_sg_iv;
1060 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1061 /* The 12 hmac bytes are scattered,
1062 * we need to copy them into a safe buffer */
1063 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1064 &crypt->icv_rev_aes);
1065 if (unlikely(!req_ctx->hmac_virt))
1066 goto unmap_sg_iv;
1067 if (!encrypt) {
1068 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1069 req->src, cryptlen, authsize, 0);
1070 }
1071 req_ctx->encrypt = encrypt;
1072 } else {
1073 req_ctx->hmac_virt = NULL;
1074 }
1075 /* Crypt */
1076 nents = count_sg(req->src, cryptlen + authsize);
1077 req_ctx->src_nents = nents;
1078 dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
1079 buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
1080 if (!buf)
1081 goto unmap_sg_src;
1082 if (!req_ctx->hmac_virt) {
1083 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1084 }
1085 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1086 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1087 BUG_ON(qmgr_stat_overflow(SEND_QID));
1088 return -EINPROGRESS;
1089unmap_sg_src:
1090 dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
1091 if (req_ctx->hmac_virt) {
1092 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1093 crypt->icv_rev_aes);
1094 }
1095unmap_sg_iv:
1096 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1097unmap_sg_assoc:
1098 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
1099 free_buf_chain(req_ctx->buffer, crypt->src_buf);
1100out:
1101 crypt->ctl_flags = CTL_FLAG_UNUSED;
1102 return ret;
1103}
1104
1105static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1106{
1107 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108 u32 *flags = &tfm->base.crt_flags;
1109 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1110 int ret;
1111
1112 if (!ctx->enckey_len && !ctx->authkey_len)
1113 return 0;
1114 init_completion(&ctx->completion);
1115 atomic_inc(&ctx->configuring);
1116
1117 reset_sa_dir(&ctx->encrypt);
1118 reset_sa_dir(&ctx->decrypt);
1119
1120 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1121 if (ret)
1122 goto out;
1123 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1124 if (ret)
1125 goto out;
1126 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1127 ctx->authkey_len, digest_len);
1128 if (ret)
1129 goto out;
1130 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1131 ctx->authkey_len, digest_len);
1132 if (ret)
1133 goto out;
1134
1135 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1136 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1137 ret = -EINVAL;
1138 goto out;
1139 } else {
1140 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1141 }
1142 }
1143out:
1144 if (!atomic_dec_and_test(&ctx->configuring))
1145 wait_for_completion(&ctx->completion);
1146 return ret;
1147}
1148
1149static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1150{
1151 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1152
1153 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1154 return -EINVAL;
1155 return aead_setup(tfm, authsize);
1156}
1157
1158static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159 unsigned int keylen)
1160{
1161 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1162 struct rtattr *rta = (struct rtattr *)key;
1163 struct crypto_authenc_key_param *param;
1164
1165 if (!RTA_OK(rta, keylen))
1166 goto badkey;
1167 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1168 goto badkey;
1169 if (RTA_PAYLOAD(rta) < sizeof(*param))
1170 goto badkey;
1171
1172 param = RTA_DATA(rta);
1173 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1174
1175 key += RTA_ALIGN(rta->rta_len);
1176 keylen -= RTA_ALIGN(rta->rta_len);
1177
1178 if (keylen < ctx->enckey_len)
1179 goto badkey;
1180
1181 ctx->authkey_len = keylen - ctx->enckey_len;
1182 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1183 memcpy(ctx->authkey, key, ctx->authkey_len);
1184
1185 return aead_setup(tfm, crypto_aead_authsize(tfm));
1186badkey:
1187 ctx->enckey_len = 0;
1188 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1189 return -EINVAL;
1190}
1191
1192static int aead_encrypt(struct aead_request *req)
1193{
1194 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1195 return aead_perform(req, 1, req->assoclen + ivsize,
1196 req->cryptlen, req->iv);
1197}
1198
1199static int aead_decrypt(struct aead_request *req)
1200{
1201 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1202 return aead_perform(req, 0, req->assoclen + ivsize,
1203 req->cryptlen, req->iv);
1204}
1205
1206static int aead_givencrypt(struct aead_givcrypt_request *req)
1207{
1208 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1209 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1210 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1211 __be64 seq;
1212
1213 /* copied from eseqiv.c */
1214 if (!ctx->salted) {
1215 get_random_bytes(ctx->salt, ivsize);
1216 ctx->salted = 1;
1217 }
1218 memcpy(req->areq.iv, ctx->salt, ivsize);
1219 len = ivsize;
1220 if (ivsize > sizeof(u64)) {
1221 memset(req->giv, 0, ivsize - sizeof(u64));
1222 len = sizeof(u64);
1223 }
1224 seq = cpu_to_be64(req->seq);
1225 memcpy(req->giv + ivsize - len, &seq, len);
1226 return aead_perform(&req->areq, 1, req->areq.assoclen,
1227 req->areq.cryptlen +ivsize, req->giv);
1228}
1229
1230static struct ixp_alg ixp4xx_algos[] = {
1231{
1232 .crypto = {
1233 .cra_name = "cbc(des)",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES_KEY_SIZE,
1237 .max_keysize = DES_KEY_SIZE,
1238 .ivsize = DES_BLOCK_SIZE,
1239 .geniv = "eseqiv",
1240 }
1241 }
1242 },
1243 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1245
1246}, {
1247 .crypto = {
1248 .cra_name = "ecb(des)",
1249 .cra_blocksize = DES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1253 }
1254 }
1255 },
1256 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258}, {
1259 .crypto = {
1260 .cra_name = "cbc(des3_ede)",
1261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1262 .cra_u = { .ablkcipher = {
1263 .min_keysize = DES3_EDE_KEY_SIZE,
1264 .max_keysize = DES3_EDE_KEY_SIZE,
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .geniv = "eseqiv",
1267 }
1268 }
1269 },
1270 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1271 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1272}, {
1273 .crypto = {
1274 .cra_name = "ecb(des3_ede)",
1275 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1276 .cra_u = { .ablkcipher = {
1277 .min_keysize = DES3_EDE_KEY_SIZE,
1278 .max_keysize = DES3_EDE_KEY_SIZE,
1279 }
1280 }
1281 },
1282 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1284}, {
1285 .crypto = {
1286 .cra_name = "cbc(aes)",
1287 .cra_blocksize = AES_BLOCK_SIZE,
1288 .cra_u = { .ablkcipher = {
1289 .min_keysize = AES_MIN_KEY_SIZE,
1290 .max_keysize = AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .geniv = "eseqiv",
1293 }
1294 }
1295 },
1296 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1297 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1298}, {
1299 .crypto = {
1300 .cra_name = "ecb(aes)",
1301 .cra_blocksize = AES_BLOCK_SIZE,
1302 .cra_u = { .ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1305 }
1306 }
1307 },
1308 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1309 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1310}, {
1311 .crypto = {
1312 .cra_name = "ctr(aes)",
1313 .cra_blocksize = AES_BLOCK_SIZE,
1314 .cra_u = { .ablkcipher = {
1315 .min_keysize = AES_MIN_KEY_SIZE,
1316 .max_keysize = AES_MAX_KEY_SIZE,
1317 .ivsize = AES_BLOCK_SIZE,
1318 .geniv = "eseqiv",
1319 }
1320 }
1321 },
1322 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1323 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1324}, {
1325 .crypto = {
1326 .cra_name = "rfc3686(ctr(aes))",
1327 .cra_blocksize = AES_BLOCK_SIZE,
1328 .cra_u = { .ablkcipher = {
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .geniv = "eseqiv",
1333 .setkey = ablk_rfc3686_setkey,
1334 .encrypt = ablk_rfc3686_crypt,
1335 .decrypt = ablk_rfc3686_crypt }
1336 }
1337 },
1338 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1339 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1340}, {
1341 .crypto = {
1342 .cra_name = "authenc(hmac(md5),cbc(des))",
1343 .cra_blocksize = DES_BLOCK_SIZE,
1344 .cra_u = { .aead = {
1345 .ivsize = DES_BLOCK_SIZE,
1346 .maxauthsize = MD5_DIGEST_SIZE,
1347 }
1348 }
1349 },
1350 .hash = &hash_alg_md5,
1351 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1352 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1353}, {
1354 .crypto = {
1355 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1356 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1357 .cra_u = { .aead = {
1358 .ivsize = DES3_EDE_BLOCK_SIZE,
1359 .maxauthsize = MD5_DIGEST_SIZE,
1360 }
1361 }
1362 },
1363 .hash = &hash_alg_md5,
1364 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1365 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1366}, {
1367 .crypto = {
1368 .cra_name = "authenc(hmac(sha1),cbc(des))",
1369 .cra_blocksize = DES_BLOCK_SIZE,
1370 .cra_u = { .aead = {
1371 .ivsize = DES_BLOCK_SIZE,
1372 .maxauthsize = SHA1_DIGEST_SIZE,
1373 }
1374 }
1375 },
1376 .hash = &hash_alg_sha1,
1377 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1378 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1379}, {
1380 .crypto = {
1381 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1383 .cra_u = { .aead = {
1384 .ivsize = DES3_EDE_BLOCK_SIZE,
1385 .maxauthsize = SHA1_DIGEST_SIZE,
1386 }
1387 }
1388 },
1389 .hash = &hash_alg_sha1,
1390 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1391 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1392}, {
1393 .crypto = {
1394 .cra_name = "authenc(hmac(md5),cbc(aes))",
1395 .cra_blocksize = AES_BLOCK_SIZE,
1396 .cra_u = { .aead = {
1397 .ivsize = AES_BLOCK_SIZE,
1398 .maxauthsize = MD5_DIGEST_SIZE,
1399 }
1400 }
1401 },
1402 .hash = &hash_alg_md5,
1403 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1404 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1405}, {
1406 .crypto = {
1407 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1408 .cra_blocksize = AES_BLOCK_SIZE,
1409 .cra_u = { .aead = {
1410 .ivsize = AES_BLOCK_SIZE,
1411 .maxauthsize = SHA1_DIGEST_SIZE,
1412 }
1413 }
1414 },
1415 .hash = &hash_alg_sha1,
1416 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1417 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1418} };
1419
1420#define IXP_POSTFIX "-ixp4xx"
1421static int __init ixp_module_init(void)
1422{
1423 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ;
1425
1426 if (platform_device_register(&pseudo_dev))
1427 return -ENODEV;
1428
1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock);
1431
1432 err = init_ixp_crypto();
1433 if (err) {
1434 platform_device_unregister(&pseudo_dev);
1435 return err;
1436 }
1437 for (i=0; i< num; i++) {
1438 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1439
1440 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1441 "%s"IXP_POSTFIX, cra->cra_name) >=
1442 CRYPTO_MAX_ALG_NAME)
1443 {
1444 continue;
1445 }
1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1447 continue;
1448 }
1449 if (!ixp4xx_algos[i].hash) {
1450 /* block ciphers */
1451 cra->cra_type = &crypto_ablkcipher_type;
1452 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1453 CRYPTO_ALG_ASYNC;
1454 if (!cra->cra_ablkcipher.setkey)
1455 cra->cra_ablkcipher.setkey = ablk_setkey;
1456 if (!cra->cra_ablkcipher.encrypt)
1457 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1458 if (!cra->cra_ablkcipher.decrypt)
1459 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1460 cra->cra_init = init_tfm_ablk;
1461 } else {
1462 /* authenc */
1463 cra->cra_type = &crypto_aead_type;
1464 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1465 CRYPTO_ALG_ASYNC;
1466 cra->cra_aead.setkey = aead_setkey;
1467 cra->cra_aead.setauthsize = aead_setauthsize;
1468 cra->cra_aead.encrypt = aead_encrypt;
1469 cra->cra_aead.decrypt = aead_decrypt;
1470 cra->cra_aead.givencrypt = aead_givencrypt;
1471 cra->cra_init = init_tfm_aead;
1472 }
1473 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1474 cra->cra_module = THIS_MODULE;
1475 cra->cra_alignmask = 3;
1476 cra->cra_priority = 300;
1477 cra->cra_exit = exit_tfm;
1478 if (crypto_register_alg(cra))
1479 printk(KERN_ERR "Failed to register '%s'\n",
1480 cra->cra_name);
1481 else
1482 ixp4xx_algos[i].registered = 1;
1483 }
1484 return 0;
1485}
1486
1487static void __exit ixp_module_exit(void)
1488{
1489 int num = ARRAY_SIZE(ixp4xx_algos);
1490 int i;
1491
1492 for (i=0; i< num; i++) {
1493 if (ixp4xx_algos[i].registered)
1494 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1495 }
1496 release_ixp_crypto();
1497 platform_device_unregister(&pseudo_dev);
1498}
1499
1500module_init(ixp_module_init);
1501module_exit(ixp_module_exit);
1502
1503MODULE_LICENSE("GPL");
1504MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1505MODULE_DESCRIPTION("IXP4xx hardware crypto");
1506
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index bb30eb9b93ef..54a2a166e566 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -385,12 +385,12 @@ static int __init padlock_init(void)
385 int ret; 385 int ret;
386 386
387 if (!cpu_has_xcrypt) { 387 if (!cpu_has_xcrypt) {
388 printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 388 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
389 return -ENODEV; 389 return -ENODEV;
390 } 390 }
391 391
392 if (!cpu_has_xcrypt_enabled) { 392 if (!cpu_has_xcrypt_enabled) {
393 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 393 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
394 return -ENODEV; 394 return -ENODEV;
395 } 395 }
396 396
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index c666b4e0933e..40d5680fa013 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -254,12 +254,12 @@ static int __init padlock_init(void)
254 int rc = -ENODEV; 254 int rc = -ENODEV;
255 255
256 if (!cpu_has_phe) { 256 if (!cpu_has_phe) {
257 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); 257 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
258 return -ENODEV; 258 return -ENODEV;
259 } 259 }
260 260
261 if (!cpu_has_phe_enabled) { 261 if (!cpu_has_phe_enabled) {
262 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 262 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
263 return -ENODEV; 263 return -ENODEV;
264 } 264 }
265 265
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
new file mode 100644
index 000000000000..b11943dadefd
--- /dev/null
+++ b/drivers/crypto/talitos.c
@@ -0,0 +1,1597 @@
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/io.h>
38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h>
40
41#include <crypto/algapi.h>
42#include <crypto/aes.h>
43#include <crypto/des.h>
44#include <crypto/sha.h>
45#include <crypto/aead.h>
46#include <crypto/authenc.h>
47
48#include "talitos.h"
49
50#define TALITOS_TIMEOUT 100000
51#define TALITOS_MAX_DATA_LEN 65535
52
53#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
54#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
55#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
56
57/* descriptor pointer entry */
58struct talitos_ptr {
59 __be16 len; /* length */
60 u8 j_extent; /* jump to sg link table and/or extent */
61 u8 eptr; /* extended address */
62 __be32 ptr; /* address */
63};
64
65/* descriptor */
66struct talitos_desc {
67 __be32 hdr; /* header high bits */
68 __be32 hdr_lo; /* header low bits */
69 struct talitos_ptr ptr[7]; /* ptr/len pair array */
70};
71
72/**
73 * talitos_request - descriptor submission request
74 * @desc: descriptor pointer (kernel virtual)
75 * @dma_desc: descriptor's physical bus address
76 * @callback: whom to call when descriptor processing is done
77 * @context: caller context (optional)
78 */
79struct talitos_request {
80 struct talitos_desc *desc;
81 dma_addr_t dma_desc;
82 void (*callback) (struct device *dev, struct talitos_desc *desc,
83 void *context, int error);
84 void *context;
85};
86
87struct talitos_private {
88 struct device *dev;
89 struct of_device *ofdev;
90 void __iomem *reg;
91 int irq;
92
93 /* SEC version geometry (from device tree node) */
94 unsigned int num_channels;
95 unsigned int chfifo_len;
96 unsigned int exec_units;
97 unsigned int desc_types;
98
99 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan;
101
102 /* per-channel request fifo */
103 struct talitos_request **fifo;
104
105 /*
106 * length of the request fifo
107 * fifo_len is chfifo_len rounded up to next power of 2
108 * so we can use bitwise ops to wrap
109 */
110 unsigned int fifo_len;
111
112 /* per-channel index to next free descriptor request */
113 int *head;
114
115 /* per-channel index to next in-progress/done descriptor request */
116 int *tail;
117
118 /* per-channel request submission (head) and release (tail) locks */
119 spinlock_t *head_lock;
120 spinlock_t *tail_lock;
121
122 /* request callback tasklet */
123 struct tasklet_struct done_task;
124 struct tasklet_struct error_task;
125
126 /* list of registered algorithms */
127 struct list_head alg_list;
128
129 /* hwrng device */
130 struct hwrng rng;
131};
132
133/*
134 * map virtual single (contiguous) pointer to h/w descriptor pointer
135 */
136static void map_single_talitos_ptr(struct device *dev,
137 struct talitos_ptr *talitos_ptr,
138 unsigned short len, void *data,
139 unsigned char extent,
140 enum dma_data_direction dir)
141{
142 talitos_ptr->len = cpu_to_be16(len);
143 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
144 talitos_ptr->j_extent = extent;
145}
146
147/*
148 * unmap bus single (contiguous) h/w descriptor pointer
149 */
150static void unmap_single_talitos_ptr(struct device *dev,
151 struct talitos_ptr *talitos_ptr,
152 enum dma_data_direction dir)
153{
154 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
155 be16_to_cpu(talitos_ptr->len), dir);
156}
157
158static int reset_channel(struct device *dev, int ch)
159{
160 struct talitos_private *priv = dev_get_drvdata(dev);
161 unsigned int timeout = TALITOS_TIMEOUT;
162
163 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
164
165 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
166 && --timeout)
167 cpu_relax();
168
169 if (timeout == 0) {
170 dev_err(dev, "failed to reset channel %d\n", ch);
171 return -EIO;
172 }
173
174 /* set done writeback and IRQ */
175 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
176 TALITOS_CCCR_LO_CDIE);
177
178 return 0;
179}
180
181static int reset_device(struct device *dev)
182{
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185
186 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
187
188 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
189 && --timeout)
190 cpu_relax();
191
192 if (timeout == 0) {
193 dev_err(dev, "failed to reset device\n");
194 return -EIO;
195 }
196
197 return 0;
198}
199
200/*
201 * Reset and initialize the device
202 */
203static int init_device(struct device *dev)
204{
205 struct talitos_private *priv = dev_get_drvdata(dev);
206 int ch, err;
207
208 /*
209 * Master reset
210 * errata documentation: warning: certain SEC interrupts
211 * are not fully cleared by writing the MCR:SWR bit,
212 * set bit twice to completely reset
213 */
214 err = reset_device(dev);
215 if (err)
216 return err;
217
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 /* reset channels */
223 for (ch = 0; ch < priv->num_channels; ch++) {
224 err = reset_channel(dev, ch);
225 if (err)
226 return err;
227 }
228
229 /* enable channel done and error interrupts */
230 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
231 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
232
233 return 0;
234}
235
236/**
237 * talitos_submit - submits a descriptor to the device for processing
238 * @dev: the SEC device to be used
239 * @desc: the descriptor to be processed by the device
240 * @callback: whom to call when processing is complete
241 * @context: a handle for use by caller (optional)
242 *
243 * desc must contain valid dma-mapped (bus physical) address pointers.
244 * callback must check err and feedback in descriptor header
245 * for device processing status.
246 */
247static int talitos_submit(struct device *dev, struct talitos_desc *desc,
248 void (*callback)(struct device *dev,
249 struct talitos_desc *desc,
250 void *context, int error),
251 void *context)
252{
253 struct talitos_private *priv = dev_get_drvdata(dev);
254 struct talitos_request *request;
255 unsigned long flags, ch;
256 int head;
257
258 /* select done notification */
259 desc->hdr |= DESC_HDR_DONE_NOTIFY;
260
261 /* emulate SEC's round-robin channel fifo polling scheme */
262 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
263
264 spin_lock_irqsave(&priv->head_lock[ch], flags);
265
266 head = priv->head[ch];
267 request = &priv->fifo[ch][head];
268
269 if (request->desc) {
270 /* request queue is full */
271 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
272 return -EAGAIN;
273 }
274
275 /* map descriptor and save caller data */
276 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
277 DMA_BIDIRECTIONAL);
278 request->callback = callback;
279 request->context = context;
280
281 /* increment fifo head */
282 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
283
284 smp_wmb();
285 request->desc = desc;
286
287 /* GO! */
288 wmb();
289 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
290
291 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
292
293 return -EINPROGRESS;
294}
295
296/*
297 * process what was done, notify callback of error if not
298 */
299static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
300{
301 struct talitos_private *priv = dev_get_drvdata(dev);
302 struct talitos_request *request, saved_req;
303 unsigned long flags;
304 int tail, status;
305
306 spin_lock_irqsave(&priv->tail_lock[ch], flags);
307
308 tail = priv->tail[ch];
309 while (priv->fifo[ch][tail].desc) {
310 request = &priv->fifo[ch][tail];
311
312 /* descriptors with their done bits set don't get the error */
313 rmb();
314 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
315 status = 0;
316 else
317 if (!error)
318 break;
319 else
320 status = error;
321
322 dma_unmap_single(dev, request->dma_desc,
323 sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
324
325 /* copy entries so we can call callback outside lock */
326 saved_req.desc = request->desc;
327 saved_req.callback = request->callback;
328 saved_req.context = request->context;
329
330 /* release request entry in fifo */
331 smp_wmb();
332 request->desc = NULL;
333
334 /* increment fifo tail */
335 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
336
337 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
338 saved_req.callback(dev, saved_req.desc, saved_req.context,
339 status);
340 /* channel may resume processing in single desc error case */
341 if (error && !reset_ch && status == error)
342 return;
343 spin_lock_irqsave(&priv->tail_lock[ch], flags);
344 tail = priv->tail[ch];
345 }
346
347 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
348}
349
350/*
351 * process completed requests for channels that have done status
352 */
353static void talitos_done(unsigned long data)
354{
355 struct device *dev = (struct device *)data;
356 struct talitos_private *priv = dev_get_drvdata(dev);
357 int ch;
358
359 for (ch = 0; ch < priv->num_channels; ch++)
360 flush_channel(dev, ch, 0, 0);
361}
362
363/*
364 * locate current (offending) descriptor
365 */
366static struct talitos_desc *current_desc(struct device *dev, int ch)
367{
368 struct talitos_private *priv = dev_get_drvdata(dev);
369 int tail = priv->tail[ch];
370 dma_addr_t cur_desc;
371
372 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
373
374 while (priv->fifo[ch][tail].dma_desc != cur_desc) {
375 tail = (tail + 1) & (priv->fifo_len - 1);
376 if (tail == priv->tail[ch]) {
377 dev_err(dev, "couldn't locate current descriptor\n");
378 return NULL;
379 }
380 }
381
382 return priv->fifo[ch][tail].desc;
383}
384
385/*
386 * user diagnostics; report root cause of error based on execution unit status
387 */
388static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
389{
390 struct talitos_private *priv = dev_get_drvdata(dev);
391 int i;
392
393 switch (desc->hdr & DESC_HDR_SEL0_MASK) {
394 case DESC_HDR_SEL0_AFEU:
395 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
396 in_be32(priv->reg + TALITOS_AFEUISR),
397 in_be32(priv->reg + TALITOS_AFEUISR_LO));
398 break;
399 case DESC_HDR_SEL0_DEU:
400 dev_err(dev, "DEUISR 0x%08x_%08x\n",
401 in_be32(priv->reg + TALITOS_DEUISR),
402 in_be32(priv->reg + TALITOS_DEUISR_LO));
403 break;
404 case DESC_HDR_SEL0_MDEUA:
405 case DESC_HDR_SEL0_MDEUB:
406 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
407 in_be32(priv->reg + TALITOS_MDEUISR),
408 in_be32(priv->reg + TALITOS_MDEUISR_LO));
409 break;
410 case DESC_HDR_SEL0_RNG:
411 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
412 in_be32(priv->reg + TALITOS_RNGUISR),
413 in_be32(priv->reg + TALITOS_RNGUISR_LO));
414 break;
415 case DESC_HDR_SEL0_PKEU:
416 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
417 in_be32(priv->reg + TALITOS_PKEUISR),
418 in_be32(priv->reg + TALITOS_PKEUISR_LO));
419 break;
420 case DESC_HDR_SEL0_AESU:
421 dev_err(dev, "AESUISR 0x%08x_%08x\n",
422 in_be32(priv->reg + TALITOS_AESUISR),
423 in_be32(priv->reg + TALITOS_AESUISR_LO));
424 break;
425 case DESC_HDR_SEL0_CRCU:
426 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
427 in_be32(priv->reg + TALITOS_CRCUISR),
428 in_be32(priv->reg + TALITOS_CRCUISR_LO));
429 break;
430 case DESC_HDR_SEL0_KEU:
431 dev_err(dev, "KEUISR 0x%08x_%08x\n",
432 in_be32(priv->reg + TALITOS_KEUISR),
433 in_be32(priv->reg + TALITOS_KEUISR_LO));
434 break;
435 }
436
437 switch (desc->hdr & DESC_HDR_SEL1_MASK) {
438 case DESC_HDR_SEL1_MDEUA:
439 case DESC_HDR_SEL1_MDEUB:
440 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
441 in_be32(priv->reg + TALITOS_MDEUISR),
442 in_be32(priv->reg + TALITOS_MDEUISR_LO));
443 break;
444 case DESC_HDR_SEL1_CRCU:
445 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
446 in_be32(priv->reg + TALITOS_CRCUISR),
447 in_be32(priv->reg + TALITOS_CRCUISR_LO));
448 break;
449 }
450
451 for (i = 0; i < 8; i++)
452 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
453 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
454 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
455}
456
457/*
458 * recover from error interrupts
459 */
460static void talitos_error(unsigned long data)
461{
462 struct device *dev = (struct device *)data;
463 struct talitos_private *priv = dev_get_drvdata(dev);
464 unsigned int timeout = TALITOS_TIMEOUT;
465 int ch, error, reset_dev = 0, reset_ch = 0;
466 u32 isr, isr_lo, v, v_lo;
467
468 isr = in_be32(priv->reg + TALITOS_ISR);
469 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
470
471 for (ch = 0; ch < priv->num_channels; ch++) {
472 /* skip channels without errors */
473 if (!(isr & (1 << (ch * 2 + 1))))
474 continue;
475
476 error = -EINVAL;
477
478 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
479 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
480
481 if (v_lo & TALITOS_CCPSR_LO_DOF) {
482 dev_err(dev, "double fetch fifo overflow error\n");
483 error = -EAGAIN;
484 reset_ch = 1;
485 }
486 if (v_lo & TALITOS_CCPSR_LO_SOF) {
487 /* h/w dropped descriptor */
488 dev_err(dev, "single fetch fifo overflow error\n");
489 error = -EAGAIN;
490 }
491 if (v_lo & TALITOS_CCPSR_LO_MDTE)
492 dev_err(dev, "master data transfer error\n");
493 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
494 dev_err(dev, "s/g data length zero error\n");
495 if (v_lo & TALITOS_CCPSR_LO_FPZ)
496 dev_err(dev, "fetch pointer zero error\n");
497 if (v_lo & TALITOS_CCPSR_LO_IDH)
498 dev_err(dev, "illegal descriptor header error\n");
499 if (v_lo & TALITOS_CCPSR_LO_IEU)
500 dev_err(dev, "invalid execution unit error\n");
501 if (v_lo & TALITOS_CCPSR_LO_EU)
502 report_eu_error(dev, ch, current_desc(dev, ch));
503 if (v_lo & TALITOS_CCPSR_LO_GB)
504 dev_err(dev, "gather boundary error\n");
505 if (v_lo & TALITOS_CCPSR_LO_GRL)
506 dev_err(dev, "gather return/length error\n");
507 if (v_lo & TALITOS_CCPSR_LO_SB)
508 dev_err(dev, "scatter boundary error\n");
509 if (v_lo & TALITOS_CCPSR_LO_SRL)
510 dev_err(dev, "scatter return/length error\n");
511
512 flush_channel(dev, ch, error, reset_ch);
513
514 if (reset_ch) {
515 reset_channel(dev, ch);
516 } else {
517 setbits32(priv->reg + TALITOS_CCCR(ch),
518 TALITOS_CCCR_CONT);
519 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
520 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
521 TALITOS_CCCR_CONT) && --timeout)
522 cpu_relax();
523 if (timeout == 0) {
524 dev_err(dev, "failed to restart channel %d\n",
525 ch);
526 reset_dev = 1;
527 }
528 }
529 }
530 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
531 dev_err(dev, "done overflow, internal time out, or rngu error: "
532 "ISR 0x%08x_%08x\n", isr, isr_lo);
533
534 /* purge request queues */
535 for (ch = 0; ch < priv->num_channels; ch++)
536 flush_channel(dev, ch, -EIO, 1);
537
538 /* reset and reinitialize the device */
539 init_device(dev);
540 }
541}
542
543static irqreturn_t talitos_interrupt(int irq, void *data)
544{
545 struct device *dev = data;
546 struct talitos_private *priv = dev_get_drvdata(dev);
547 u32 isr, isr_lo;
548
549 isr = in_be32(priv->reg + TALITOS_ISR);
550 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
551
552 /* ack */
553 out_be32(priv->reg + TALITOS_ICR, isr);
554 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
555
556 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
557 talitos_error((unsigned long)data);
558 else
559 if (likely(isr & TALITOS_ISR_CHDONE))
560 tasklet_schedule(&priv->done_task);
561
562 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
563}
564
565/*
566 * hwrng
567 */
568static int talitos_rng_data_present(struct hwrng *rng, int wait)
569{
570 struct device *dev = (struct device *)rng->priv;
571 struct talitos_private *priv = dev_get_drvdata(dev);
572 u32 ofl;
573 int i;
574
575 for (i = 0; i < 20; i++) {
576 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
577 TALITOS_RNGUSR_LO_OFL;
578 if (ofl || !wait)
579 break;
580 udelay(10);
581 }
582
583 return !!ofl;
584}
585
586static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
587{
588 struct device *dev = (struct device *)rng->priv;
589 struct talitos_private *priv = dev_get_drvdata(dev);
590
591 /* rng fifo requires 64-bit accesses */
592 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
593 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
594
595 return sizeof(u32);
596}
597
598static int talitos_rng_init(struct hwrng *rng)
599{
600 struct device *dev = (struct device *)rng->priv;
601 struct talitos_private *priv = dev_get_drvdata(dev);
602 unsigned int timeout = TALITOS_TIMEOUT;
603
604 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
605 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
606 && --timeout)
607 cpu_relax();
608 if (timeout == 0) {
609 dev_err(dev, "failed to reset rng hw\n");
610 return -ENODEV;
611 }
612
613 /* start generating */
614 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
615
616 return 0;
617}
618
619static int talitos_register_rng(struct device *dev)
620{
621 struct talitos_private *priv = dev_get_drvdata(dev);
622
623 priv->rng.name = dev_driver_string(dev),
624 priv->rng.init = talitos_rng_init,
625 priv->rng.data_present = talitos_rng_data_present,
626 priv->rng.data_read = talitos_rng_data_read,
627 priv->rng.priv = (unsigned long)dev;
628
629 return hwrng_register(&priv->rng);
630}
631
632static void talitos_unregister_rng(struct device *dev)
633{
634 struct talitos_private *priv = dev_get_drvdata(dev);
635
636 hwrng_unregister(&priv->rng);
637}
638
639/*
640 * crypto alg
641 */
642#define TALITOS_CRA_PRIORITY 3000
643#define TALITOS_MAX_KEY_SIZE 64
644#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
645
646#define MD5_DIGEST_SIZE 16
647
648struct talitos_ctx {
649 struct device *dev;
650 __be32 desc_hdr_template;
651 u8 key[TALITOS_MAX_KEY_SIZE];
652 u8 iv[TALITOS_MAX_IV_LENGTH];
653 unsigned int keylen;
654 unsigned int enckeylen;
655 unsigned int authkeylen;
656 unsigned int authsize;
657};
658
659static int aead_authenc_setauthsize(struct crypto_aead *authenc,
660 unsigned int authsize)
661{
662 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
663
664 ctx->authsize = authsize;
665
666 return 0;
667}
668
669static int aead_authenc_setkey(struct crypto_aead *authenc,
670 const u8 *key, unsigned int keylen)
671{
672 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
673 struct rtattr *rta = (void *)key;
674 struct crypto_authenc_key_param *param;
675 unsigned int authkeylen;
676 unsigned int enckeylen;
677
678 if (!RTA_OK(rta, keylen))
679 goto badkey;
680
681 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
682 goto badkey;
683
684 if (RTA_PAYLOAD(rta) < sizeof(*param))
685 goto badkey;
686
687 param = RTA_DATA(rta);
688 enckeylen = be32_to_cpu(param->enckeylen);
689
690 key += RTA_ALIGN(rta->rta_len);
691 keylen -= RTA_ALIGN(rta->rta_len);
692
693 if (keylen < enckeylen)
694 goto badkey;
695
696 authkeylen = keylen - enckeylen;
697
698 if (keylen > TALITOS_MAX_KEY_SIZE)
699 goto badkey;
700
701 memcpy(&ctx->key, key, keylen);
702
703 ctx->keylen = keylen;
704 ctx->enckeylen = enckeylen;
705 ctx->authkeylen = authkeylen;
706
707 return 0;
708
709badkey:
710 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
711 return -EINVAL;
712}
713
714/*
715 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
716 * @src_nents: number of segments in input scatterlist
717 * @dst_nents: number of segments in output scatterlist
718 * @dma_len: length of dma mapped link_tbl space
719 * @dma_link_tbl: bus physical address of link_tbl
720 * @desc: h/w descriptor
721 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
722 *
723 * if decrypting (with authcheck), or either one of src_nents or dst_nents
724 * is greater than 1, an integrity check value is concatenated to the end
725 * of link_tbl data
726 */
727struct ipsec_esp_edesc {
728 int src_nents;
729 int dst_nents;
730 int dma_len;
731 dma_addr_t dma_link_tbl;
732 struct talitos_desc desc;
733 struct talitos_ptr link_tbl[0];
734};
735
736static void ipsec_esp_unmap(struct device *dev,
737 struct ipsec_esp_edesc *edesc,
738 struct aead_request *areq)
739{
740 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
741 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
742 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
743 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
744
745 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
746
747 if (areq->src != areq->dst) {
748 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
749 DMA_TO_DEVICE);
750 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
751 DMA_FROM_DEVICE);
752 } else {
753 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
754 DMA_BIDIRECTIONAL);
755 }
756
757 if (edesc->dma_len)
758 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
759 DMA_BIDIRECTIONAL);
760}
761
762/*
763 * ipsec_esp descriptor callbacks
764 */
765static void ipsec_esp_encrypt_done(struct device *dev,
766 struct talitos_desc *desc, void *context,
767 int err)
768{
769 struct aead_request *areq = context;
770 struct ipsec_esp_edesc *edesc =
771 container_of(desc, struct ipsec_esp_edesc, desc);
772 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
773 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
774 struct scatterlist *sg;
775 void *icvdata;
776
777 ipsec_esp_unmap(dev, edesc, areq);
778
779 /* copy the generated ICV to dst */
780 if (edesc->dma_len) {
781 icvdata = &edesc->link_tbl[edesc->src_nents +
782 edesc->dst_nents + 1];
783 sg = sg_last(areq->dst, edesc->dst_nents);
784 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
785 icvdata, ctx->authsize);
786 }
787
788 kfree(edesc);
789
790 aead_request_complete(areq, err);
791}
792
793static void ipsec_esp_decrypt_done(struct device *dev,
794 struct talitos_desc *desc, void *context,
795 int err)
796{
797 struct aead_request *req = context;
798 struct ipsec_esp_edesc *edesc =
799 container_of(desc, struct ipsec_esp_edesc, desc);
800 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
801 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
802 struct scatterlist *sg;
803 void *icvdata;
804
805 ipsec_esp_unmap(dev, edesc, req);
806
807 if (!err) {
808 /* auth check */
809 if (edesc->dma_len)
810 icvdata = &edesc->link_tbl[edesc->src_nents +
811 edesc->dst_nents + 1];
812 else
813 icvdata = &edesc->link_tbl[0];
814
815 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
816 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
817 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
818 }
819
820 kfree(edesc);
821
822 aead_request_complete(req, err);
823}
824
825/*
826 * convert scatterlist to SEC h/w link table format
827 * stop at cryptlen bytes
828 */
829static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
830 int cryptlen, struct talitos_ptr *link_tbl_ptr)
831{
832 int n_sg = sg_count;
833
834 while (n_sg--) {
835 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
836 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
837 link_tbl_ptr->j_extent = 0;
838 link_tbl_ptr++;
839 cryptlen -= sg_dma_len(sg);
840 sg = sg_next(sg);
841 }
842
843 /* adjust (decrease) last one (or two) entry's len to cryptlen */
844 link_tbl_ptr--;
845 while (link_tbl_ptr->len <= (-cryptlen)) {
846 /* Empty this entry, and move to previous one */
847 cryptlen += be16_to_cpu(link_tbl_ptr->len);
848 link_tbl_ptr->len = 0;
849 sg_count--;
850 link_tbl_ptr--;
851 }
852 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
853 + cryptlen);
854
855 /* tag end of link table */
856 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
857
858 return sg_count;
859}
860
861/*
862 * fill in and submit ipsec_esp descriptor
863 */
864static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
865 u8 *giv, u64 seq,
866 void (*callback) (struct device *dev,
867 struct talitos_desc *desc,
868 void *context, int error))
869{
870 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
871 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
872 struct device *dev = ctx->dev;
873 struct talitos_desc *desc = &edesc->desc;
874 unsigned int cryptlen = areq->cryptlen;
875 unsigned int authsize = ctx->authsize;
876 unsigned int ivsize;
877 int sg_count;
878
879 /* hmac key */
880 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
881 0, DMA_TO_DEVICE);
882 /* hmac data */
883 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
884 sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
885 DMA_TO_DEVICE);
886 /* cipher iv */
887 ivsize = crypto_aead_ivsize(aead);
888 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
889 DMA_TO_DEVICE);
890
891 /* cipher key */
892 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
893 (char *)&ctx->key + ctx->authkeylen, 0,
894 DMA_TO_DEVICE);
895
896 /*
897 * cipher in
898 * map and adjust cipher len to aead request cryptlen.
899 * extent is bytes of HMAC postpended to ciphertext,
900 * typically 12 for ipsec
901 */
902 desc->ptr[4].len = cpu_to_be16(cryptlen);
903 desc->ptr[4].j_extent = authsize;
904
905 if (areq->src == areq->dst)
906 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
907 DMA_BIDIRECTIONAL);
908 else
909 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
910 DMA_TO_DEVICE);
911
912 if (sg_count == 1) {
913 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
914 } else {
915 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
916 &edesc->link_tbl[0]);
917 if (sg_count > 1) {
918 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
919 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
920 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
921 edesc->dma_len, DMA_BIDIRECTIONAL);
922 } else {
923 /* Only one segment now, so no link tbl needed */
924 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
925 }
926 }
927
928 /* cipher out */
929 desc->ptr[5].len = cpu_to_be16(cryptlen);
930 desc->ptr[5].j_extent = authsize;
931
932 if (areq->src != areq->dst) {
933 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
934 DMA_FROM_DEVICE);
935 }
936
937 if (sg_count == 1) {
938 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
939 } else {
940 struct talitos_ptr *link_tbl_ptr =
941 &edesc->link_tbl[edesc->src_nents];
942 struct scatterlist *sg;
943
944 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
945 edesc->dma_link_tbl +
946 edesc->src_nents);
947 if (areq->src == areq->dst) {
948 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
949 edesc->src_nents * sizeof(struct talitos_ptr));
950 } else {
951 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
952 link_tbl_ptr);
953 }
954 link_tbl_ptr += sg_count - 1;
955
956 /* handle case where sg_last contains the ICV exclusively */
957 sg = sg_last(areq->dst, edesc->dst_nents);
958 if (sg->length == ctx->authsize)
959 link_tbl_ptr--;
960
961 link_tbl_ptr->j_extent = 0;
962 link_tbl_ptr++;
963 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
964 link_tbl_ptr->len = cpu_to_be16(authsize);
965
966 /* icv data follows link tables */
967 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
968 edesc->dma_link_tbl +
969 edesc->src_nents +
970 edesc->dst_nents + 1);
971
972 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
973 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
974 edesc->dma_len, DMA_BIDIRECTIONAL);
975 }
976
977 /* iv out */
978 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
979 DMA_FROM_DEVICE);
980
981 return talitos_submit(dev, desc, callback, areq);
982}
983
984
985/*
986 * derive number of elements in scatterlist
987 */
988static int sg_count(struct scatterlist *sg_list, int nbytes)
989{
990 struct scatterlist *sg = sg_list;
991 int sg_nents = 0;
992
993 while (nbytes) {
994 sg_nents++;
995 nbytes -= sg->length;
996 sg = sg_next(sg);
997 }
998
999 return sg_nents;
1000}
1001
1002/*
1003 * allocate and map the ipsec_esp extended descriptor
1004 */
1005static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1006 int icv_stashing)
1007{
1008 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1010 struct ipsec_esp_edesc *edesc;
1011 int src_nents, dst_nents, alloc_len, dma_len;
1012
1013 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
1014 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
1015 return ERR_PTR(-EINVAL);
1016 }
1017
1018 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
1019 src_nents = (src_nents == 1) ? 0 : src_nents;
1020
1021 if (areq->dst == areq->src) {
1022 dst_nents = src_nents;
1023 } else {
1024 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
1025 dst_nents = (dst_nents == 1) ? 0 : src_nents;
1026 }
1027
1028 /*
1029 * allocate space for base edesc plus the link tables,
1030 * allowing for a separate entry for the generated ICV (+ 1),
1031 * and the ICV data itself
1032 */
1033 alloc_len = sizeof(struct ipsec_esp_edesc);
1034 if (src_nents || dst_nents) {
1035 dma_len = (src_nents + dst_nents + 1) *
1036 sizeof(struct talitos_ptr) + ctx->authsize;
1037 alloc_len += dma_len;
1038 } else {
1039 dma_len = 0;
1040 alloc_len += icv_stashing ? ctx->authsize : 0;
1041 }
1042
1043 edesc = kmalloc(alloc_len, GFP_DMA);
1044 if (!edesc) {
1045 dev_err(ctx->dev, "could not allocate edescriptor\n");
1046 return ERR_PTR(-ENOMEM);
1047 }
1048
1049 edesc->src_nents = src_nents;
1050 edesc->dst_nents = dst_nents;
1051 edesc->dma_len = dma_len;
1052 edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
1053 edesc->dma_len, DMA_BIDIRECTIONAL);
1054
1055 return edesc;
1056}
1057
1058static int aead_authenc_encrypt(struct aead_request *req)
1059{
1060 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1061 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1062 struct ipsec_esp_edesc *edesc;
1063
1064 /* allocate extended descriptor */
1065 edesc = ipsec_esp_edesc_alloc(req, 0);
1066 if (IS_ERR(edesc))
1067 return PTR_ERR(edesc);
1068
1069 /* set encrypt */
1070 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1071
1072 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1073}
1074
1075static int aead_authenc_decrypt(struct aead_request *req)
1076{
1077 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1078 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1079 unsigned int authsize = ctx->authsize;
1080 struct ipsec_esp_edesc *edesc;
1081 struct scatterlist *sg;
1082 void *icvdata;
1083
1084 req->cryptlen -= authsize;
1085
1086 /* allocate extended descriptor */
1087 edesc = ipsec_esp_edesc_alloc(req, 1);
1088 if (IS_ERR(edesc))
1089 return PTR_ERR(edesc);
1090
1091 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1092 if (edesc->dma_len)
1093 icvdata = &edesc->link_tbl[edesc->src_nents +
1094 edesc->dst_nents + 1];
1095 else
1096 icvdata = &edesc->link_tbl[0];
1097
1098 sg = sg_last(req->src, edesc->src_nents ? : 1);
1099
1100 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1101 ctx->authsize);
1102
1103 /* decrypt */
1104 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1105
1106 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
1107}
1108
1109static int aead_authenc_givencrypt(
1110 struct aead_givcrypt_request *req)
1111{
1112 struct aead_request *areq = &req->areq;
1113 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1114 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1115 struct ipsec_esp_edesc *edesc;
1116
1117 /* allocate extended descriptor */
1118 edesc = ipsec_esp_edesc_alloc(areq, 0);
1119 if (IS_ERR(edesc))
1120 return PTR_ERR(edesc);
1121
1122 /* set encrypt */
1123 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1124
1125 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1126
1127 return ipsec_esp(edesc, areq, req->giv, req->seq,
1128 ipsec_esp_encrypt_done);
1129}
1130
1131struct talitos_alg_template {
1132 char name[CRYPTO_MAX_ALG_NAME];
1133 char driver_name[CRYPTO_MAX_ALG_NAME];
1134 unsigned int blocksize;
1135 struct aead_alg aead;
1136 struct device *dev;
1137 __be32 desc_hdr_template;
1138};
1139
1140static struct talitos_alg_template driver_algs[] = {
1141 /* single-pass ipsec_esp descriptor */
1142 {
1143 .name = "authenc(hmac(sha1),cbc(aes))",
1144 .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1145 .blocksize = AES_BLOCK_SIZE,
1146 .aead = {
1147 .setkey = aead_authenc_setkey,
1148 .setauthsize = aead_authenc_setauthsize,
1149 .encrypt = aead_authenc_encrypt,
1150 .decrypt = aead_authenc_decrypt,
1151 .givencrypt = aead_authenc_givencrypt,
1152 .geniv = "<built-in>",
1153 .ivsize = AES_BLOCK_SIZE,
1154 .maxauthsize = SHA1_DIGEST_SIZE,
1155 },
1156 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1157 DESC_HDR_SEL0_AESU |
1158 DESC_HDR_MODE0_AESU_CBC |
1159 DESC_HDR_SEL1_MDEUA |
1160 DESC_HDR_MODE1_MDEU_INIT |
1161 DESC_HDR_MODE1_MDEU_PAD |
1162 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1163 },
1164 {
1165 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1166 .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1167 .blocksize = DES3_EDE_BLOCK_SIZE,
1168 .aead = {
1169 .setkey = aead_authenc_setkey,
1170 .setauthsize = aead_authenc_setauthsize,
1171 .encrypt = aead_authenc_encrypt,
1172 .decrypt = aead_authenc_decrypt,
1173 .givencrypt = aead_authenc_givencrypt,
1174 .geniv = "<built-in>",
1175 .ivsize = DES3_EDE_BLOCK_SIZE,
1176 .maxauthsize = SHA1_DIGEST_SIZE,
1177 },
1178 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1179 DESC_HDR_SEL0_DEU |
1180 DESC_HDR_MODE0_DEU_CBC |
1181 DESC_HDR_MODE0_DEU_3DES |
1182 DESC_HDR_SEL1_MDEUA |
1183 DESC_HDR_MODE1_MDEU_INIT |
1184 DESC_HDR_MODE1_MDEU_PAD |
1185 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1186 },
1187 {
1188 .name = "authenc(hmac(sha256),cbc(aes))",
1189 .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1190 .blocksize = AES_BLOCK_SIZE,
1191 .aead = {
1192 .setkey = aead_authenc_setkey,
1193 .setauthsize = aead_authenc_setauthsize,
1194 .encrypt = aead_authenc_encrypt,
1195 .decrypt = aead_authenc_decrypt,
1196 .givencrypt = aead_authenc_givencrypt,
1197 .geniv = "<built-in>",
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA256_DIGEST_SIZE,
1200 },
1201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1202 DESC_HDR_SEL0_AESU |
1203 DESC_HDR_MODE0_AESU_CBC |
1204 DESC_HDR_SEL1_MDEUA |
1205 DESC_HDR_MODE1_MDEU_INIT |
1206 DESC_HDR_MODE1_MDEU_PAD |
1207 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1208 },
1209 {
1210 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1211 .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1212 .blocksize = DES3_EDE_BLOCK_SIZE,
1213 .aead = {
1214 .setkey = aead_authenc_setkey,
1215 .setauthsize = aead_authenc_setauthsize,
1216 .encrypt = aead_authenc_encrypt,
1217 .decrypt = aead_authenc_decrypt,
1218 .givencrypt = aead_authenc_givencrypt,
1219 .geniv = "<built-in>",
1220 .ivsize = DES3_EDE_BLOCK_SIZE,
1221 .maxauthsize = SHA256_DIGEST_SIZE,
1222 },
1223 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1224 DESC_HDR_SEL0_DEU |
1225 DESC_HDR_MODE0_DEU_CBC |
1226 DESC_HDR_MODE0_DEU_3DES |
1227 DESC_HDR_SEL1_MDEUA |
1228 DESC_HDR_MODE1_MDEU_INIT |
1229 DESC_HDR_MODE1_MDEU_PAD |
1230 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1231 },
1232 {
1233 .name = "authenc(hmac(md5),cbc(aes))",
1234 .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1235 .blocksize = AES_BLOCK_SIZE,
1236 .aead = {
1237 .setkey = aead_authenc_setkey,
1238 .setauthsize = aead_authenc_setauthsize,
1239 .encrypt = aead_authenc_encrypt,
1240 .decrypt = aead_authenc_decrypt,
1241 .givencrypt = aead_authenc_givencrypt,
1242 .geniv = "<built-in>",
1243 .ivsize = AES_BLOCK_SIZE,
1244 .maxauthsize = MD5_DIGEST_SIZE,
1245 },
1246 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1247 DESC_HDR_SEL0_AESU |
1248 DESC_HDR_MODE0_AESU_CBC |
1249 DESC_HDR_SEL1_MDEUA |
1250 DESC_HDR_MODE1_MDEU_INIT |
1251 DESC_HDR_MODE1_MDEU_PAD |
1252 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1253 },
1254 {
1255 .name = "authenc(hmac(md5),cbc(des3_ede))",
1256 .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1257 .blocksize = DES3_EDE_BLOCK_SIZE,
1258 .aead = {
1259 .setkey = aead_authenc_setkey,
1260 .setauthsize = aead_authenc_setauthsize,
1261 .encrypt = aead_authenc_encrypt,
1262 .decrypt = aead_authenc_decrypt,
1263 .givencrypt = aead_authenc_givencrypt,
1264 .geniv = "<built-in>",
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .maxauthsize = MD5_DIGEST_SIZE,
1267 },
1268 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1269 DESC_HDR_SEL0_DEU |
1270 DESC_HDR_MODE0_DEU_CBC |
1271 DESC_HDR_MODE0_DEU_3DES |
1272 DESC_HDR_SEL1_MDEUA |
1273 DESC_HDR_MODE1_MDEU_INIT |
1274 DESC_HDR_MODE1_MDEU_PAD |
1275 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1276 }
1277};
1278
1279struct talitos_crypto_alg {
1280 struct list_head entry;
1281 struct device *dev;
1282 __be32 desc_hdr_template;
1283 struct crypto_alg crypto_alg;
1284};
1285
1286static int talitos_cra_init(struct crypto_tfm *tfm)
1287{
1288 struct crypto_alg *alg = tfm->__crt_alg;
1289 struct talitos_crypto_alg *talitos_alg =
1290 container_of(alg, struct talitos_crypto_alg, crypto_alg);
1291 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1292
1293 /* update context with ptr to dev */
1294 ctx->dev = talitos_alg->dev;
1295 /* copy descriptor header template value */
1296 ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1297
1298 /* random first IV */
1299 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1300
1301 return 0;
1302}
1303
1304/*
1305 * given the alg's descriptor header template, determine whether descriptor
1306 * type and primary/secondary execution units required match the hw
1307 * capabilities description provided in the device tree node.
1308 */
1309static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1310{
1311 struct talitos_private *priv = dev_get_drvdata(dev);
1312 int ret;
1313
1314 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1315 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1316
1317 if (SECONDARY_EU(desc_hdr_template))
1318 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1319 & priv->exec_units);
1320
1321 return ret;
1322}
1323
1324static int __devexit talitos_remove(struct of_device *ofdev)
1325{
1326 struct device *dev = &ofdev->dev;
1327 struct talitos_private *priv = dev_get_drvdata(dev);
1328 struct talitos_crypto_alg *t_alg, *n;
1329 int i;
1330
1331 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1332 crypto_unregister_alg(&t_alg->crypto_alg);
1333 list_del(&t_alg->entry);
1334 kfree(t_alg);
1335 }
1336
1337 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1338 talitos_unregister_rng(dev);
1339
1340 kfree(priv->tail);
1341 kfree(priv->head);
1342
1343 if (priv->fifo)
1344 for (i = 0; i < priv->num_channels; i++)
1345 kfree(priv->fifo[i]);
1346
1347 kfree(priv->fifo);
1348 kfree(priv->head_lock);
1349 kfree(priv->tail_lock);
1350
1351 if (priv->irq != NO_IRQ) {
1352 free_irq(priv->irq, dev);
1353 irq_dispose_mapping(priv->irq);
1354 }
1355
1356 tasklet_kill(&priv->done_task);
1357 tasklet_kill(&priv->error_task);
1358
1359 iounmap(priv->reg);
1360
1361 dev_set_drvdata(dev, NULL);
1362
1363 kfree(priv);
1364
1365 return 0;
1366}
1367
1368static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1369 struct talitos_alg_template
1370 *template)
1371{
1372 struct talitos_crypto_alg *t_alg;
1373 struct crypto_alg *alg;
1374
1375 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1376 if (!t_alg)
1377 return ERR_PTR(-ENOMEM);
1378
1379 alg = &t_alg->crypto_alg;
1380
1381 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1382 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1383 template->driver_name);
1384 alg->cra_module = THIS_MODULE;
1385 alg->cra_init = talitos_cra_init;
1386 alg->cra_priority = TALITOS_CRA_PRIORITY;
1387 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1388 alg->cra_blocksize = template->blocksize;
1389 alg->cra_alignmask = 0;
1390 alg->cra_type = &crypto_aead_type;
1391 alg->cra_ctxsize = sizeof(struct talitos_ctx);
1392 alg->cra_u.aead = template->aead;
1393
1394 t_alg->desc_hdr_template = template->desc_hdr_template;
1395 t_alg->dev = dev;
1396
1397 return t_alg;
1398}
1399
1400static int talitos_probe(struct of_device *ofdev,
1401 const struct of_device_id *match)
1402{
1403 struct device *dev = &ofdev->dev;
1404 struct device_node *np = ofdev->node;
1405 struct talitos_private *priv;
1406 const unsigned int *prop;
1407 int i, err;
1408
1409 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1410 if (!priv)
1411 return -ENOMEM;
1412
1413 dev_set_drvdata(dev, priv);
1414
1415 priv->ofdev = ofdev;
1416
1417 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1418 tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
1419
1420 priv->irq = irq_of_parse_and_map(np, 0);
1421
1422 if (priv->irq == NO_IRQ) {
1423 dev_err(dev, "failed to map irq\n");
1424 err = -EINVAL;
1425 goto err_out;
1426 }
1427
1428 /* get the irq line */
1429 err = request_irq(priv->irq, talitos_interrupt, 0,
1430 dev_driver_string(dev), dev);
1431 if (err) {
1432 dev_err(dev, "failed to request irq %d\n", priv->irq);
1433 irq_dispose_mapping(priv->irq);
1434 priv->irq = NO_IRQ;
1435 goto err_out;
1436 }
1437
1438 priv->reg = of_iomap(np, 0);
1439 if (!priv->reg) {
1440 dev_err(dev, "failed to of_iomap\n");
1441 err = -ENOMEM;
1442 goto err_out;
1443 }
1444
1445 /* get SEC version capabilities from device tree */
1446 prop = of_get_property(np, "fsl,num-channels", NULL);
1447 if (prop)
1448 priv->num_channels = *prop;
1449
1450 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1451 if (prop)
1452 priv->chfifo_len = *prop;
1453
1454 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1455 if (prop)
1456 priv->exec_units = *prop;
1457
1458 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1459 if (prop)
1460 priv->desc_types = *prop;
1461
1462 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1463 !priv->exec_units || !priv->desc_types) {
1464 dev_err(dev, "invalid property data in device tree node\n");
1465 err = -EINVAL;
1466 goto err_out;
1467 }
1468
1469 of_node_put(np);
1470 np = NULL;
1471
1472 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1473 GFP_KERNEL);
1474 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1475 GFP_KERNEL);
1476 if (!priv->head_lock || !priv->tail_lock) {
1477 dev_err(dev, "failed to allocate fifo locks\n");
1478 err = -ENOMEM;
1479 goto err_out;
1480 }
1481
1482 for (i = 0; i < priv->num_channels; i++) {
1483 spin_lock_init(&priv->head_lock[i]);
1484 spin_lock_init(&priv->tail_lock[i]);
1485 }
1486
1487 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1488 priv->num_channels, GFP_KERNEL);
1489 if (!priv->fifo) {
1490 dev_err(dev, "failed to allocate request fifo\n");
1491 err = -ENOMEM;
1492 goto err_out;
1493 }
1494
1495 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1496
1497 for (i = 0; i < priv->num_channels; i++) {
1498 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
1499 priv->fifo_len, GFP_KERNEL);
1500 if (!priv->fifo[i]) {
1501 dev_err(dev, "failed to allocate request fifo %d\n", i);
1502 err = -ENOMEM;
1503 goto err_out;
1504 }
1505 }
1506
1507 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1508 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1509 if (!priv->head || !priv->tail) {
1510 dev_err(dev, "failed to allocate request index space\n");
1511 err = -ENOMEM;
1512 goto err_out;
1513 }
1514
1515 /* reset and initialize the h/w */
1516 err = init_device(dev);
1517 if (err) {
1518 dev_err(dev, "failed to initialize device\n");
1519 goto err_out;
1520 }
1521
1522 /* register the RNG, if available */
1523 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1524 err = talitos_register_rng(dev);
1525 if (err) {
1526 dev_err(dev, "failed to register hwrng: %d\n", err);
1527 goto err_out;
1528 } else
1529 dev_info(dev, "hwrng\n");
1530 }
1531
1532 /* register crypto algorithms the device supports */
1533 INIT_LIST_HEAD(&priv->alg_list);
1534
1535 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1536 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1537 struct talitos_crypto_alg *t_alg;
1538
1539 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1540 if (IS_ERR(t_alg)) {
1541 err = PTR_ERR(t_alg);
1542 goto err_out;
1543 }
1544
1545 err = crypto_register_alg(&t_alg->crypto_alg);
1546 if (err) {
1547 dev_err(dev, "%s alg registration failed\n",
1548 t_alg->crypto_alg.cra_driver_name);
1549 kfree(t_alg);
1550 } else {
1551 list_add_tail(&t_alg->entry, &priv->alg_list);
1552 dev_info(dev, "%s\n",
1553 t_alg->crypto_alg.cra_driver_name);
1554 }
1555 }
1556 }
1557
1558 return 0;
1559
1560err_out:
1561 talitos_remove(ofdev);
1562 if (np)
1563 of_node_put(np);
1564
1565 return err;
1566}
1567
1568static struct of_device_id talitos_match[] = {
1569 {
1570 .compatible = "fsl,sec2.0",
1571 },
1572 {},
1573};
1574MODULE_DEVICE_TABLE(of, talitos_match);
1575
1576static struct of_platform_driver talitos_driver = {
1577 .name = "talitos",
1578 .match_table = talitos_match,
1579 .probe = talitos_probe,
1580 .remove = __devexit_p(talitos_remove),
1581};
1582
1583static int __init talitos_init(void)
1584{
1585 return of_register_platform_driver(&talitos_driver);
1586}
1587module_init(talitos_init);
1588
1589static void __exit talitos_exit(void)
1590{
1591 of_unregister_platform_driver(&talitos_driver);
1592}
1593module_exit(talitos_exit);
1594
1595MODULE_LICENSE("GPL");
1596MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
1597MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
new file mode 100644
index 000000000000..c48a405abf70
--- /dev/null
+++ b/drivers/crypto/talitos.h
@@ -0,0 +1,199 @@
1/*
2 * Freescale SEC (talitos) device register and descriptor header defines
3 *
4 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31/*
32 * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
33 */
34
35/* global register offset addresses */
36#define TALITOS_MCR 0x1030 /* master control register */
37#define TALITOS_MCR_LO 0x1038
38#define TALITOS_MCR_SWR 0x1 /* s/w reset */
39#define TALITOS_IMR 0x1008 /* interrupt mask register */
40#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
41#define TALITOS_IMR_LO 0x100C
42#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
43#define TALITOS_ISR 0x1010 /* interrupt status register */
44#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
45#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
46#define TALITOS_ISR_LO 0x1014
47#define TALITOS_ICR 0x1018 /* interrupt clear register */
48#define TALITOS_ICR_LO 0x101C
49
50/* channel register address stride */
51#define TALITOS_CH_STRIDE 0x100
52
53/* channel configuration register */
54#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
55#define TALITOS_CCCR_CONT 0x2 /* channel continue */
56#define TALITOS_CCCR_RESET 0x1 /* channel reset */
57#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
58#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
59#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
60#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
61
62/* CCPSR: channel pointer status register */
63#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
64#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
65#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
66#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
67#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
68#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */
69#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */
70#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */
71#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */
72#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */
73#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */
74#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */
75#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */
76#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
77
78/* channel fetch fifo register */
79#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
80#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
81
82/* current descriptor pointer register */
83#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
84#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
85
86/* descriptor buffer register */
87#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
88#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
89
90/* gather link table */
91#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
92#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
93
94/* scatter link table */
95#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
96#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
97
98/* execution unit interrupt status registers */
99#define TALITOS_DEUISR 0x2030 /* DES unit */
100#define TALITOS_DEUISR_LO 0x2034
101#define TALITOS_AESUISR 0x4030 /* AES unit */
102#define TALITOS_AESUISR_LO 0x4034
103#define TALITOS_MDEUISR 0x6030 /* message digest unit */
104#define TALITOS_MDEUISR_LO 0x6034
105#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
106#define TALITOS_AFEUISR_LO 0x8034
107#define TALITOS_RNGUISR 0xa030 /* random number unit */
108#define TALITOS_RNGUISR_LO 0xa034
109#define TALITOS_RNGUSR 0xa028 /* rng status */
110#define TALITOS_RNGUSR_LO 0xa02c
111#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */
112#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */
113#define TALITOS_RNGUDSR 0xa010 /* data size */
114#define TALITOS_RNGUDSR_LO 0xa014
115#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */
116#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */
117#define TALITOS_RNGURCR 0xa018 /* reset control */
118#define TALITOS_RNGURCR_LO 0xa01c
119#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */
120#define TALITOS_PKEUISR 0xc030 /* public key unit */
121#define TALITOS_PKEUISR_LO 0xc034
122#define TALITOS_KEUISR 0xe030 /* kasumi unit */
123#define TALITOS_KEUISR_LO 0xe034
124#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
125#define TALITOS_CRCUISR_LO 0xf034
126
127/*
128 * talitos descriptor header (hdr) bits
129 */
130
131/* written back when done */
132#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
133
134/* primary execution unit select */
135#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
136#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
137#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
138#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
139#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
140#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
141#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
142#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
143#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
144#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
145
146/* primary execution unit mode (MODE0) and derivatives */
147#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
148#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
149#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
150#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
151#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
152#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
153#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
154#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
155#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
156#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
157#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
158 DESC_HDR_MODE0_MDEU_HMAC)
159#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
160 DESC_HDR_MODE0_MDEU_HMAC)
161#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \
162 DESC_HDR_MODE0_MDEU_HMAC)
163
164/* secondary execution unit select (SEL1) */
165#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
166#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
167#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
168#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
169
170/* secondary execution unit mode (MODE1) and derivatives */
171#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
172#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
173#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
174#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
175#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
176#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
177#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
178 DESC_HDR_MODE1_MDEU_HMAC)
179#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
180 DESC_HDR_MODE1_MDEU_HMAC)
181#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
182 DESC_HDR_MODE1_MDEU_HMAC)
183
184/* direction of overall data flow (DIR) */
185#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
186
187/* request done notification (DN) */
188#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
189
190/* descriptor types */
191#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
192#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
193#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
194#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
195
196/* link table extent field bits */
197#define DESC_PTR_LNKTBL_JUMP 0x80
198#define DESC_PTR_LNKTBL_RETURN 0x02
199#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index dc2cec6127d1..ebb9e51deb0c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -26,6 +26,16 @@ config EDD_OFF
26 kernel. Say N if you want EDD enabled by default. EDD can be dynamically set 26 kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
27 using the kernel parameter 'edd={on|skipmbr|off}'. 27 using the kernel parameter 'edd={on|skipmbr|off}'.
28 28
29config FIRMWARE_MEMMAP
30 bool "Add firmware-provided memory map to sysfs" if EMBEDDED
31 default (X86_64 || X86_32)
32 help
33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
34 That memory map is used for example by kexec to set up parameter area
35 for the next kernel, but can also be used for debugging purposes.
36
37 See also Documentation/ABI/testing/sysfs-firmware-memmap.
38
29config EFI_VARS 39config EFI_VARS
30 tristate "EFI Variable Support via sysfs" 40 tristate "EFI Variable Support via sysfs"
31 depends on EFI 41 depends on EFI
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4c9147154df8..1c3c17343dbe 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
10obj-$(CONFIG_DMIID) += dmi-id.o 10obj-$(CONFIG_DMIID) += dmi-id.o
11obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 11obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
12obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 12obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
13obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5e3ed7e903b..455575be3560 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -8,6 +8,11 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <asm/dmi.h> 9#include <asm/dmi.h>
10 10
11/*
12 * DMI stands for "Desktop Management Interface". It is part
13 * of and an antecedent to, SMBIOS, which stands for System
14 * Management BIOS. See further: http://www.dmtf.org/standards
15 */
11static char dmi_empty_string[] = " "; 16static char dmi_empty_string[] = " ";
12 17
13static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) 18static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
new file mode 100644
index 000000000000..e23399c7f773
--- /dev/null
+++ b/drivers/firmware/memmap.c
@@ -0,0 +1,205 @@
1/*
2 * linux/drivers/firmware/memmap.c
3 * Copyright (C) 2008 SUSE LINUX Products GmbH
4 * by Bernhard Walle <bwalle@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2.0 as published by
8 * the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/string.h>
18#include <linux/firmware-map.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/bootmem.h>
23
24/*
25 * Data types ------------------------------------------------------------------
26 */
27
28/*
29 * Firmware map entry. Because firmware memory maps are flat and not
30 * hierarchical, it's ok to organise them in a linked list. No parent
31 * information is necessary as for the resource tree.
32 */
33struct firmware_map_entry {
34 resource_size_t start; /* start of the memory range */
35 resource_size_t end; /* end of the memory range (incl.) */
36 const char *type; /* type of the memory range */
37 struct list_head list; /* entry for the linked list */
38 struct kobject kobj; /* kobject for each entry */
39};
40
41/*
42 * Forward declarations --------------------------------------------------------
43 */
44static ssize_t memmap_attr_show(struct kobject *kobj,
45 struct attribute *attr, char *buf);
46static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
47static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
48static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
49
50/*
51 * Static data -----------------------------------------------------------------
52 */
53
54struct memmap_attribute {
55 struct attribute attr;
56 ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
57};
58
59struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
60struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
61struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
62
63/*
64 * These are default attributes that are added for every memmap entry.
65 */
66static struct attribute *def_attrs[] = {
67 &memmap_start_attr.attr,
68 &memmap_end_attr.attr,
69 &memmap_type_attr.attr,
70 NULL
71};
72
73static struct sysfs_ops memmap_attr_ops = {
74 .show = memmap_attr_show,
75};
76
77static struct kobj_type memmap_ktype = {
78 .sysfs_ops = &memmap_attr_ops,
79 .default_attrs = def_attrs,
80};
81
82/*
83 * Registration functions ------------------------------------------------------
84 */
85
86/*
87 * Firmware memory map entries
88 */
89static LIST_HEAD(map_entries);
90
91/**
92 * Common implementation of firmware_map_add() and firmware_map_add_early()
93 * which expects a pre-allocated struct firmware_map_entry.
94 *
95 * @start: Start of the memory range.
96 * @end: End of the memory range (inclusive).
97 * @type: Type of the memory range.
98 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
99 * entry.
100 */
101static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
102 const char *type,
103 struct firmware_map_entry *entry)
104{
105 BUG_ON(start > end);
106
107 entry->start = start;
108 entry->end = end;
109 entry->type = type;
110 INIT_LIST_HEAD(&entry->list);
111 kobject_init(&entry->kobj, &memmap_ktype);
112
113 list_add_tail(&entry->list, &map_entries);
114
115 return 0;
116}
117
118/*
119 * See <linux/firmware-map.h> for documentation.
120 */
121int firmware_map_add(resource_size_t start, resource_size_t end,
122 const char *type)
123{
124 struct firmware_map_entry *entry;
125
126 entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
127 WARN_ON(!entry);
128 if (!entry)
129 return -ENOMEM;
130
131 return firmware_map_add_entry(start, end, type, entry);
132}
133
134/*
135 * See <linux/firmware-map.h> for documentation.
136 */
137int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
138 const char *type)
139{
140 struct firmware_map_entry *entry;
141
142 entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
143 WARN_ON(!entry);
144 if (!entry)
145 return -ENOMEM;
146
147 return firmware_map_add_entry(start, end, type, entry);
148}
149
150/*
151 * Sysfs functions -------------------------------------------------------------
152 */
153
154static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
155{
156 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->start);
157}
158
159static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
160{
161 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->end);
162}
163
164static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
165{
166 return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
167}
168
169#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
170#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
171
172static ssize_t memmap_attr_show(struct kobject *kobj,
173 struct attribute *attr, char *buf)
174{
175 struct firmware_map_entry *entry = to_memmap_entry(kobj);
176 struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
177
178 return memmap_attr->show(entry, buf);
179}
180
181/*
182 * Initialises stuff and adds the entries in the map_entries list to
183 * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
184 * must be called before late_initcall.
185 */
186static int __init memmap_init(void)
187{
188 int i = 0;
189 struct firmware_map_entry *entry;
190 struct kset *memmap_kset;
191
192 memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
193 WARN_ON(!memmap_kset);
194 if (!memmap_kset)
195 return -ENOMEM;
196
197 list_for_each_entry(entry, &map_entries, list) {
198 entry->kobj.kset = memmap_kset;
199 kobject_add(&entry->kobj, NULL, "%d", i++);
200 }
201
202 return 0;
203}
204late_initcall(memmap_init);
205
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
new file mode 100644
index 000000000000..de566cf0414c
--- /dev/null
+++ b/drivers/gpu/Makefile
@@ -0,0 +1 @@
obj-y += drm/
diff --git a/drivers/char/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 610d6fd5bb50..610d6fd5bb50 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
new file mode 100644
index 000000000000..e9f9a97ae00a
--- /dev/null
+++ b/drivers/gpu/drm/Makefile
@@ -0,0 +1,26 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
12
13drm-$(CONFIG_COMPAT) += drm_ioc32.o
14
15obj-$(CONFIG_DRM) += drm.o
16obj-$(CONFIG_DRM_TDFX) += tdfx/
17obj-$(CONFIG_DRM_R128) += r128/
18obj-$(CONFIG_DRM_RADEON)+= radeon/
19obj-$(CONFIG_DRM_MGA) += mga/
20obj-$(CONFIG_DRM_I810) += i810/
21obj-$(CONFIG_DRM_I830) += i830/
22obj-$(CONFIG_DRM_I915) += i915/
23obj-$(CONFIG_DRM_SIS) += sis/
24obj-$(CONFIG_DRM_SAVAGE)+= savage/
25obj-$(CONFIG_DRM_VIA) +=via/
26
diff --git a/drivers/char/drm/README.drm b/drivers/gpu/drm/README.drm
index b5b332722581..b5b332722581 100644
--- a/drivers/char/drm/README.drm
+++ b/drivers/gpu/drm/README.drm
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index c533d0c9ec61..c533d0c9ec61 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index aefa5ac4c0b1..aefa5ac4c0b1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
diff --git a/drivers/char/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index a73462723d2d..a73462723d2d 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bde64b84166e..bde64b84166e 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
diff --git a/drivers/char/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index d505f695421f..d505f695421f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
diff --git a/drivers/char/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 7a8e2fba4678..7a8e2fba4678 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c5..1839c57663c5 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
diff --git a/drivers/char/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 564138714bb5..564138714bb5 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
diff --git a/drivers/char/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 851a53f1acce..851a53f1acce 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 33160673a7b7..33160673a7b7 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdcb..90f5a8d9bdcb 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 16829fb3089d..16829fb3089d 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
diff --git a/drivers/char/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 089c015c01d1..089c015c01d1 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
diff --git a/drivers/char/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 0998723cde79..0998723cde79 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
diff --git a/drivers/char/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 845081b44f63..845081b44f63 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
diff --git a/drivers/char/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index dcff9e9b52e3..dcff9e9b52e3 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
diff --git a/drivers/char/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b55d5bc6ea61..b55d5bc6ea61 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
diff --git a/drivers/char/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 93b1e0475c93..93b1e0475c93 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index b2b0f3d41714..b2b0f3d41714 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
diff --git a/drivers/char/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 926f146390ce..926f146390ce 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
diff --git a/drivers/char/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c2f584f3b46c..c2f584f3b46c 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index af211a0ef179..af211a0ef179 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
diff --git a/drivers/char/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c234c6f24a8d..c234c6f24a8d 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
new file mode 100644
index 000000000000..43844ecafcc5
--- /dev/null
+++ b/drivers/gpu/drm/i810/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i810-y := i810_drv.o i810_dma.o
7
8obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/char/drm/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e5de8ea41544..e5de8ea41544 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
diff --git a/drivers/char/drm/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a817966..fabb9a817966 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
diff --git a/drivers/char/drm/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 0118849a5672..0118849a5672 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
new file mode 100644
index 000000000000..c642ee0b238c
--- /dev/null
+++ b/drivers/gpu/drm/i830/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i830-y := i830_drv.o i830_dma.o i830_irq.o
7
8obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/char/drm/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index a86ab30b4620..a86ab30b4620 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
diff --git a/drivers/char/drm/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a623..389597e4a623 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
diff --git a/drivers/char/drm/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
index b5bf8cc0fdaa..b5bf8cc0fdaa 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/gpu/drm/i830/i830_drv.h
diff --git a/drivers/char/drm/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
index 91ec2bb497e9..91ec2bb497e9 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/gpu/drm/i830/i830_irq.c
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
7
8i915-$(CONFIG_COMPAT) += i915_ioc32.o
9
10obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/char/drm/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 88974342933c..88974342933c 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
diff --git a/drivers/char/drm/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 93aed1c38bd2..93aed1c38bd2 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
diff --git a/drivers/char/drm/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d7326d92a237..d7326d92a237 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
diff --git a/drivers/char/drm/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b75..1fe68a251b75 100644
--- a/drivers/char/drm/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
diff --git a/drivers/char/drm/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df036118b8b1..df036118b8b1 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
diff --git a/drivers/char/drm/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 6126a60dc9cb..6126a60dc9cb 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
new file mode 100644
index 000000000000..60684785c203
--- /dev/null
+++ b/drivers/gpu/drm/mga/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
7
8mga-$(CONFIG_COMPAT) += mga_ioc32.o
9
10obj-$(CONFIG_DRM_MGA) += mga.o
11
diff --git a/drivers/char/drm/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c1d12dbfa8d8..c1d12dbfa8d8 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
diff --git a/drivers/char/drm/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5572939fc7d1..5572939fc7d1 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
diff --git a/drivers/char/drm/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index f6ebd24bd587..f6ebd24bd587 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478ddee..30d00478ddee 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
diff --git a/drivers/char/drm/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 9302cb8f0f83..9302cb8f0f83 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
diff --git a/drivers/char/drm/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index d3f8aade07b3..d3f8aade07b3 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
diff --git a/drivers/char/drm/mga_ucode.h b/drivers/gpu/drm/mga/mga_ucode.h
index b611e27470e1..b611e27470e1 100644
--- a/drivers/char/drm/mga_ucode.h
+++ b/drivers/gpu/drm/mga/mga_ucode.h
diff --git a/drivers/char/drm/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 651b93c8ab5d..651b93c8ab5d 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
new file mode 100644
index 000000000000..1cc72ae3a880
--- /dev/null
+++ b/drivers/gpu/drm/r128/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
7
8r128-$(CONFIG_COMPAT) += r128_ioc32.o
9
10obj-$(CONFIG_DRM_R128) += r128.o
diff --git a/drivers/char/drm/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index c31afbde62e7..c31afbde62e7 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
diff --git a/drivers/char/drm/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 6108e7587e12..6108e7587e12 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
diff --git a/drivers/char/drm/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 011105e51ac6..011105e51ac6 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
diff --git a/drivers/char/drm/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee84..d3cb676eee84 100644
--- a/drivers/char/drm/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
diff --git a/drivers/char/drm/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index c76fdca7662d..c76fdca7662d 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
diff --git a/drivers/char/drm/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 51a9afce7b9b..51a9afce7b9b 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 000000000000..feb521ebc393
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
7
8radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
9
10obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 702df45320f7..702df45320f7 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
diff --git a/drivers/char/drm/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index a6802f26afc4..a6802f26afc4 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index e53158f0ecb5..e53158f0ecb5 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 349ac3d3b848..349ac3d3b848 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3f0eca957aa7..3f0eca957aa7 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
diff --git a/drivers/char/drm/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 56decda2a71f..56decda2a71f 100644
--- a/drivers/char/drm/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index ee40d197deb7..ee40d197deb7 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index 4af5286a36fb..4af5286a36fb 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
diff --git a/drivers/char/drm/radeon_microcode.h b/drivers/gpu/drm/radeon/radeon_microcode.h
index a348c9e7db1c..a348c9e7db1c 100644
--- a/drivers/char/drm/radeon_microcode.h
+++ b/drivers/gpu/drm/radeon/radeon_microcode.h
diff --git a/drivers/char/drm/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 11c146b49211..11c146b49211 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
new file mode 100644
index 000000000000..d8f84ac7bb26
--- /dev/null
+++ b/drivers/gpu/drm/savage/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6savage-y := savage_drv.o savage_bci.o savage_state.o
7
8obj-$(CONFIG_DRM_SAVAGE)+= savage.o
9
diff --git a/drivers/char/drm/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index d465b2f9c1cd..d465b2f9c1cd 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
diff --git a/drivers/char/drm/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7c..eee52aa92a7c 100644
--- a/drivers/char/drm/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
diff --git a/drivers/char/drm/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index df2aac6636f7..df2aac6636f7 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
diff --git a/drivers/char/drm/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 5f6238fdf1fa..5f6238fdf1fa 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
new file mode 100644
index 000000000000..441c061c3ad0
--- /dev/null
+++ b/drivers/gpu/drm/sis/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6sis-y := sis_drv.o sis_mm.o
7
8obj-$(CONFIG_DRM_SIS) += sis.o
9
10
diff --git a/drivers/char/drm/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7dacc64e9b56..7dacc64e9b56 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
diff --git a/drivers/char/drm/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index ef940bad63f7..ef940bad63f7 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
diff --git a/drivers/char/drm/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index b3878770fce1..b3878770fce1 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
new file mode 100644
index 000000000000..0379f294b32a
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6tdfx-y := tdfx_drv.o
7
8obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/char/drm/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b2..012ff2e356b2 100644
--- a/drivers/char/drm/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
diff --git a/drivers/char/drm/tdfx_drv.h b/drivers/gpu/drm/tdfx/tdfx_drv.h
index 84204ec1b046..84204ec1b046 100644
--- a/drivers/char/drm/tdfx_drv.h
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.h
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
new file mode 100644
index 000000000000..d59e258e2c13
--- /dev/null
+++ b/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
7
8obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/char/drm/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
index 462375d543b9..462375d543b9 100644
--- a/drivers/char/drm/via_3d_reg.h
+++ b/drivers/gpu/drm/via/via_3d_reg.h
diff --git a/drivers/char/drm/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 7a339dba6a69..7a339dba6a69 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd07..409e00afdd07 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
index 7408a547a036..7408a547a036 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/gpu/drm/via/via_dmablit.h
diff --git a/drivers/char/drm/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 80c01cdfa37d..80c01cdfa37d 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
diff --git a/drivers/char/drm/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 2daae81874cd..2daae81874cd 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
diff --git a/drivers/char/drm/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c6bb978a1106..c6bb978a1106 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
diff --git a/drivers/char/drm/via_map.c b/drivers/gpu/drm/via/via_map.c
index a967556be014..a967556be014 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
diff --git a/drivers/char/drm/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index e64094916e4f..e64094916e4f 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
diff --git a/drivers/char/drm/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 46a579198747..46a579198747 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
diff --git a/drivers/char/drm/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
index d6f8214b69f5..d6f8214b69f5 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/gpu/drm/via/via_verifier.h
diff --git a/drivers/char/drm/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac12459..6ec04ac12459 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 3381424d70a1..8dbf4d9b6447 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -63,11 +63,11 @@ MODULE_LICENSE("Dual MPL/GPL");
63 63
64#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) 64#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
65 65
66#ifdef PCMCIA_DEBUG 66#ifdef CONFIG_PCMCIA_DEBUG
67INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); 67INT_MODULE_PARM(pc_debug, 0);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) 68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69static char *version = 69/*static char *version =
70"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)"; 70"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)";*/
71#else 71#else
72#define DEBUG(n, args...) 72#define DEBUG(n, args...)
73#endif 73#endif
@@ -375,7 +375,7 @@ failed:
375 375
376======================================================================*/ 376======================================================================*/
377 377
378void ide_release(struct pcmcia_device *link) 378static void ide_release(struct pcmcia_device *link)
379{ 379{
380 ide_info_t *info = link->priv; 380 ide_info_t *info = link->priv;
381 ide_hwif_t *hwif = info->hwif; 381 ide_hwif_t *hwif = info->hwif;
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 0f47f4697cdf..9ce3b3baf3a2 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -66,6 +66,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
66 case XENKBD_TYPE_MOTION: 66 case XENKBD_TYPE_MOTION:
67 input_report_rel(dev, REL_X, event->motion.rel_x); 67 input_report_rel(dev, REL_X, event->motion.rel_x);
68 input_report_rel(dev, REL_Y, event->motion.rel_y); 68 input_report_rel(dev, REL_Y, event->motion.rel_y);
69 if (event->motion.rel_z)
70 input_report_rel(dev, REL_WHEEL,
71 -event->motion.rel_z);
69 break; 72 break;
70 case XENKBD_TYPE_KEY: 73 case XENKBD_TYPE_KEY:
71 dev = NULL; 74 dev = NULL;
@@ -84,6 +87,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
84 case XENKBD_TYPE_POS: 87 case XENKBD_TYPE_POS:
85 input_report_abs(dev, ABS_X, event->pos.abs_x); 88 input_report_abs(dev, ABS_X, event->pos.abs_x);
86 input_report_abs(dev, ABS_Y, event->pos.abs_y); 89 input_report_abs(dev, ABS_Y, event->pos.abs_y);
90 if (event->pos.rel_z)
91 input_report_rel(dev, REL_WHEEL,
92 -event->pos.rel_z);
87 break; 93 break;
88 } 94 }
89 if (dev) 95 if (dev)
@@ -152,7 +158,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
152 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); 158 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
153 for (i = BTN_LEFT; i <= BTN_TASK; i++) 159 for (i = BTN_LEFT; i <= BTN_TASK; i++)
154 set_bit(i, ptr->keybit); 160 set_bit(i, ptr->keybit);
155 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y); 161 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
156 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); 162 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
157 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); 163 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
158 164
@@ -294,6 +300,16 @@ InitWait:
294 */ 300 */
295 if (dev->state != XenbusStateConnected) 301 if (dev->state != XenbusStateConnected)
296 goto InitWait; /* no InitWait seen yet, fudge it */ 302 goto InitWait; /* no InitWait seen yet, fudge it */
303
304 /* Set input abs params to match backend screen res */
305 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
306 "width", "%d", &val) > 0)
307 input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
308
309 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
310 "height", "%d", &val) > 0)
311 input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
312
297 break; 313 break;
298 314
299 case XenbusStateClosing: 315 case XenbusStateClosing:
@@ -337,4 +353,6 @@ static void __exit xenkbd_cleanup(void)
337module_init(xenkbd_init); 353module_init(xenkbd_init);
338module_exit(xenkbd_cleanup); 354module_exit(xenkbd_cleanup);
339 355
356MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend");
340MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
358MODULE_ALIAS("xen:vkbd");
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 6b8dbb9ba73b..76f2b36881c3 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !(X86_VISWS || X86_VOYAGER) 3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 005bd045d2eb..5faefeaf6790 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -136,7 +136,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
136 * first step in the migration to the kernel types. pte_pfn is already defined 136 * first step in the migration to the kernel types. pte_pfn is already defined
137 * in the kernel. */ 137 * in the kernel. */
138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
139#define pte_flags(x) (pte_val(x) & ~PAGE_MASK)
140#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 139#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
141 140
142/* interrupts_and_traps.c: */ 141/* interrupts_and_traps.c: */
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10748240cb2f..6a866d7c8ae5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -50,17 +50,19 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
50/** 50/**
51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged 51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged
52 * @q: request queue 52 * @q: request queue
53 * @bio: the buffer head that's been built up so far 53 * @bvm: properties of new bio
54 * @biovec: the request that could be merged to it. 54 * @biovec: the request that could be merged to it.
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q,
59 struct bvec_merge_data *bvm,
60 struct bio_vec *biovec)
59{ 61{
60 mddev_t *mddev = q->queuedata; 62 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 63 dev_info_t *dev0;
62 unsigned long maxsectors, bio_sectors = bio->bi_size >> 9; 64 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
63 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 65 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
64 66
65 dev0 = which_dev(mddev, sector); 67 dev0 = which_dev(mddev, sector);
66 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1)); 68 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 914c04ddec7c..bcbb82594a19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -241,18 +241,20 @@ static int create_strip_zones (mddev_t *mddev)
241/** 241/**
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged 242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @q: request queue 243 * @q: request queue
244 * @bio: the buffer head that's been built up so far 244 * @bvm: properties of new bio
245 * @biovec: the request that could be merged to it. 245 * @biovec: the request that could be merged to it.
246 * 246 *
247 * Return amount of bytes we can accept at this offset 247 * Return amount of bytes we can accept at this offset
248 */ 248 */
249static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 249static int raid0_mergeable_bvec(struct request_queue *q,
250 struct bvec_merge_data *bvm,
251 struct bio_vec *biovec)
250{ 252{
251 mddev_t *mddev = q->queuedata; 253 mddev_t *mddev = q->queuedata;
252 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 254 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
253 int max; 255 int max;
254 unsigned int chunk_sectors = mddev->chunk_size >> 9; 256 unsigned int chunk_sectors = mddev->chunk_size >> 9;
255 unsigned int bio_sectors = bio->bi_size >> 9; 257 unsigned int bio_sectors = bvm->bi_size >> 9;
256 258
257 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 259 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
258 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 260 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a71277b640ab..22bb2b1b886d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -439,26 +439,27 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
439/** 439/**
440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
441 * @q: request queue 441 * @q: request queue
442 * @bio: the buffer head that's been built up so far 442 * @bvm: properties of new bio
443 * @biovec: the request that could be merged to it. 443 * @biovec: the request that could be merged to it.
444 * 444 *
445 * Return amount of bytes we can accept at this offset 445 * Return amount of bytes we can accept at this offset
446 * If near_copies == raid_disk, there are no striping issues, 446 * If near_copies == raid_disk, there are no striping issues,
447 * but in that case, the function isn't called at all. 447 * but in that case, the function isn't called at all.
448 */ 448 */
449static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio, 449static int raid10_mergeable_bvec(struct request_queue *q,
450 struct bio_vec *bio_vec) 450 struct bvec_merge_data *bvm,
451 struct bio_vec *biovec)
451{ 452{
452 mddev_t *mddev = q->queuedata; 453 mddev_t *mddev = q->queuedata;
453 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 454 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
454 int max; 455 int max;
455 unsigned int chunk_sectors = mddev->chunk_size >> 9; 456 unsigned int chunk_sectors = mddev->chunk_size >> 9;
456 unsigned int bio_sectors = bio->bi_size >> 9; 457 unsigned int bio_sectors = bvm->bi_size >> 9;
457 458
458 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 459 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
459 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 460 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
460 if (max <= bio_vec->bv_len && bio_sectors == 0) 461 if (max <= biovec->bv_len && bio_sectors == 0)
461 return bio_vec->bv_len; 462 return biovec->bv_len;
462 else 463 else
463 return max; 464 return max;
464} 465}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3b27df52456b..9ce7154845c6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3314,15 +3314,17 @@ static int raid5_congested(void *data, int bits)
3314/* We want read requests to align with chunks where possible, 3314/* We want read requests to align with chunks where possible,
3315 * but write requests don't need to. 3315 * but write requests don't need to.
3316 */ 3316 */
3317static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3317static int raid5_mergeable_bvec(struct request_queue *q,
3318 struct bvec_merge_data *bvm,
3319 struct bio_vec *biovec)
3318{ 3320{
3319 mddev_t *mddev = q->queuedata; 3321 mddev_t *mddev = q->queuedata;
3320 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3322 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3321 int max; 3323 int max;
3322 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3324 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3323 unsigned int bio_sectors = bio->bi_size >> 9; 3325 unsigned int bio_sectors = bvm->bi_size >> 9;
3324 3326
3325 if (bio_data_dir(bio) == WRITE) 3327 if ((bvm->bi_rw & 1) == WRITE)
3326 return biovec->bv_len; /* always allow writes to be mergeable */ 3328 return biovec->bv_len; /* always allow writes to be mergeable */
3327 3329
3328 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3330 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 0d5ce03cdff2..5b5a14dab3d3 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -332,7 +332,7 @@ static int __init pwm_probe(struct platform_device *pdev)
332 p->base = ioremap(r->start, r->end - r->start + 1); 332 p->base = ioremap(r->start, r->end - r->start + 1);
333 if (!p->base) 333 if (!p->base)
334 goto fail; 334 goto fail;
335 p->clk = clk_get(&pdev->dev, "mck"); 335 p->clk = clk_get(&pdev->dev, "pwm_clk");
336 if (IS_ERR(p->clk)) { 336 if (IS_ERR(p->clk)) {
337 status = PTR_ERR(p->clk); 337 status = PTR_ERR(p->clk);
338 p->clk = NULL; 338 p->clk = NULL;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 4a79b187b568..5c29872184e6 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -130,10 +130,6 @@ typedef struct partition_t {
130 u_int16_t DataUnits; 130 u_int16_t DataUnits;
131 u_int32_t BlocksPerUnit; 131 u_int32_t BlocksPerUnit;
132 erase_unit_header_t header; 132 erase_unit_header_t header;
133#if 0
134 region_info_t region;
135 memory_handle_t handle;
136#endif
137} partition_t; 133} partition_t;
138 134
139/* Partition state flags */ 135/* Partition state flags */
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 1912d968718b..0cc31675aeb9 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -498,17 +498,14 @@ static int pcmciamtd_config(struct pcmcia_device *link)
498 int i; 498 int i;
499 config_info_t t; 499 config_info_t t;
500 static char *probes[] = { "jedec_probe", "cfi_probe" }; 500 static char *probes[] = { "jedec_probe", "cfi_probe" };
501 cisinfo_t cisinfo;
502 int new_name = 0; 501 int new_name = 0;
503 502
504 DEBUG(3, "link=0x%p", link); 503 DEBUG(3, "link=0x%p", link);
505 504
506 DEBUG(2, "Validating CIS"); 505 DEBUG(2, "Validating CIS");
507 ret = pcmcia_validate_cis(link, &cisinfo); 506 ret = pcmcia_validate_cis(link, NULL);
508 if(ret != CS_SUCCESS) { 507 if(ret != CS_SUCCESS) {
509 cs_error(link, GetTupleData, ret); 508 cs_error(link, GetTupleData, ret);
510 } else {
511 DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
512 } 509 }
513 510
514 card_settings(dev, link, &new_name); 511 card_settings(dev, link, &new_name);
@@ -563,9 +560,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
563 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); 560 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
564 561
565 /* Get write protect status */ 562 /* Get write protect status */
566 CS_CHECK(GetStatus, pcmcia_get_status(link, &status)); 563 DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
567 DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
568 status.CardState, (unsigned long)link->win);
569 dev->win_base = ioremap(req.Base, req.Size); 564 dev->win_base = ioremap(req.Base, req.Size);
570 if(!dev->win_base) { 565 if(!dev->win_base) {
571 err("ioremap(%lu, %u) failed", req.Base, req.Size); 566 err("ioremap(%lu, %u) failed", req.Base, req.Size);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 92dccd43bdca..0a5745a854c7 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1277,8 +1277,45 @@ static int __exit macb_remove(struct platform_device *pdev)
1277 return 0; 1277 return 0;
1278} 1278}
1279 1279
1280#ifdef CONFIG_PM
1281static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1282{
1283 struct net_device *netdev = platform_get_drvdata(pdev);
1284 struct macb *bp = netdev_priv(netdev);
1285
1286 netif_device_detach(netdev);
1287
1288#ifndef CONFIG_ARCH_AT91
1289 clk_disable(bp->hclk);
1290#endif
1291 clk_disable(bp->pclk);
1292
1293 return 0;
1294}
1295
1296static int macb_resume(struct platform_device *pdev)
1297{
1298 struct net_device *netdev = platform_get_drvdata(pdev);
1299 struct macb *bp = netdev_priv(netdev);
1300
1301 clk_enable(bp->pclk);
1302#ifndef CONFIG_ARCH_AT91
1303 clk_enable(bp->hclk);
1304#endif
1305
1306 netif_device_attach(netdev);
1307
1308 return 0;
1309}
1310#else
1311#define macb_suspend NULL
1312#define macb_resume NULL
1313#endif
1314
1280static struct platform_driver macb_driver = { 1315static struct platform_driver macb_driver = {
1281 .remove = __exit_p(macb_remove), 1316 .remove = __exit_p(macb_remove),
1317 .suspend = macb_suspend,
1318 .resume = macb_resume,
1282 .driver = { 1319 .driver = {
1283 .name = "macb", 1320 .name = "macb",
1284 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d26f69b0184f..ef671d1a3bf0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1324,7 +1324,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1324 goto fail; 1324 goto fail;
1325 } 1325 }
1326 1326
1327 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); 1327 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1328 if (!txs) { 1328 if (!txs) {
1329 err = -ENOMEM; 1329 err = -ENOMEM;
1330 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1330 xenbus_dev_fatal(dev, err, "allocating tx ring page");
@@ -1340,7 +1340,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1340 } 1340 }
1341 1341
1342 info->tx_ring_ref = err; 1342 info->tx_ring_ref = err;
1343 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); 1343 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1344 if (!rxs) { 1344 if (!rxs) {
1345 err = -ENOMEM; 1345 err = -ENOMEM;
1346 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1346 xenbus_dev_fatal(dev, err, "allocating rx ring page");
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 66c0fd21894b..bb0642318a95 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1637,12 +1637,43 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1637} 1637}
1638 1638
1639#ifdef CONFIG_DMAR_GFX_WA 1639#ifdef CONFIG_DMAR_GFX_WA
1640extern int arch_get_ram_range(int slot, u64 *addr, u64 *size); 1640struct iommu_prepare_data {
1641 struct pci_dev *pdev;
1642 int ret;
1643};
1644
1645static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1646 unsigned long end_pfn, void *datax)
1647{
1648 struct iommu_prepare_data *data;
1649
1650 data = (struct iommu_prepare_data *)datax;
1651
1652 data->ret = iommu_prepare_identity_map(data->pdev,
1653 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1654 return data->ret;
1655
1656}
1657
1658static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1659{
1660 int nid;
1661 struct iommu_prepare_data data;
1662
1663 data.pdev = pdev;
1664 data.ret = 0;
1665
1666 for_each_online_node(nid) {
1667 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1668 if (data.ret)
1669 return data.ret;
1670 }
1671 return data.ret;
1672}
1673
1641static void __init iommu_prepare_gfx_mapping(void) 1674static void __init iommu_prepare_gfx_mapping(void)
1642{ 1675{
1643 struct pci_dev *pdev = NULL; 1676 struct pci_dev *pdev = NULL;
1644 u64 base, size;
1645 int slot;
1646 int ret; 1677 int ret;
1647 1678
1648 for_each_pci_dev(pdev) { 1679 for_each_pci_dev(pdev) {
@@ -1651,17 +1682,9 @@ static void __init iommu_prepare_gfx_mapping(void)
1651 continue; 1682 continue;
1652 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", 1683 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1653 pci_name(pdev)); 1684 pci_name(pdev));
1654 slot = arch_get_ram_range(0, &base, &size); 1685 ret = iommu_prepare_with_active_regions(pdev);
1655 while (slot >= 0) { 1686 if (ret)
1656 ret = iommu_prepare_identity_map(pdev, 1687 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1657 base, base + size);
1658 if (ret)
1659 goto error;
1660 slot = arch_get_ram_range(slot, &base, &size);
1661 }
1662 continue;
1663error:
1664 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1665 } 1688 }
1666} 1689}
1667#endif 1690#endif
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 1b0eb5aaf650..e45402adac3f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -263,6 +263,13 @@ config OMAP_CF
263 Say Y here to support the CompactFlash controller on OMAP. 263 Say Y here to support the CompactFlash controller on OMAP.
264 Note that this doesn't support "True IDE" mode. 264 Note that this doesn't support "True IDE" mode.
265 265
266config BFIN_CFPCMCIA
267 tristate "Blackfin CompactFlash PCMCIA Driver"
268 depends on PCMCIA && BLACKFIN
269 help
270 Say Y here to support the CompactFlash PCMCIA driver for Blackfin.
271
272
266config AT91_CF 273config AT91_CF
267 tristate "AT91 CompactFlash Controller" 274 tristate "AT91 CompactFlash Controller"
268 depends on PCMCIA && ARCH_AT91RM9200 275 depends on PCMCIA && ARCH_AT91RM9200
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 6f6478ba7174..85c6cc931f97 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
38obj-$(CONFIG_OMAP_CF) += omap_cf.o 38obj-$(CONFIG_OMAP_CF) += omap_cf.o
39obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
39obj-$(CONFIG_AT91_CF) += at91_cf.o 40obj-$(CONFIG_AT91_CF) += at91_cf.o
40obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 41obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
41 42
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index 1e467bb54077..a53ef5902518 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -26,7 +26,6 @@
26#include <pcmcia/cs_types.h> 26#include <pcmcia/cs_types.h>
27#include <pcmcia/cs.h> 27#include <pcmcia/cs.h>
28#include <pcmcia/ss.h> 28#include <pcmcia/ss.h>
29#include <pcmcia/bulkmem.h>
30#include <pcmcia/cistpl.h> 29#include <pcmcia/cistpl.h>
31#include "cs_internal.h" 30#include "cs_internal.h"
32 31
@@ -34,9 +33,9 @@
34#define AU1000_PCMCIA_IO_SPEED (255) 33#define AU1000_PCMCIA_IO_SPEED (255)
35#define AU1000_PCMCIA_MEM_SPEED (300) 34#define AU1000_PCMCIA_MEM_SPEED (300)
36 35
37#define AU1X_SOCK0_IO 0xF00000000 36#define AU1X_SOCK0_IO 0xF00000000ULL
38#define AU1X_SOCK0_PHYS_ATTR 0xF40000000 37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
39#define AU1X_SOCK0_PHYS_MEM 0xF80000000 38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
40/* pseudo 32 bit phys addresses, which get fixed up to the 39/* pseudo 32 bit phys addresses, which get fixed up to the
41 * real 36 bit address in fixup_bigphys_addr() */ 40 * real 36 bit address in fixup_bigphys_addr() */
42#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000 41#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000
@@ -45,16 +44,20 @@
45/* pcmcia socket 1 needs external glue logic so the memory map 44/* pcmcia socket 1 needs external glue logic so the memory map
46 * differs from board to board. 45 * differs from board to board.
47 */ 46 */
48#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_PB1200) 47#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || \
49#define AU1X_SOCK1_IO 0xF08000000 48 defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || \
50#define AU1X_SOCK1_PHYS_ATTR 0xF48000000 49 defined(CONFIG_MIPS_PB1200)
51#define AU1X_SOCK1_PHYS_MEM 0xF88000000 50#define AU1X_SOCK1_IO 0xF08000000ULL
51#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
52#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
52#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000 53#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000
53#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000 54#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000
54#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || defined(CONFIG_MIPS_DB1200) 55#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \
55#define AU1X_SOCK1_IO 0xF04000000 56 defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || \
56#define AU1X_SOCK1_PHYS_ATTR 0xF44000000 57 defined(CONFIG_MIPS_DB1200)
57#define AU1X_SOCK1_PHYS_MEM 0xF84000000 58#define AU1X_SOCK1_IO 0xF04000000ULL
59#define AU1X_SOCK1_PHYS_ATTR 0xF44000000ULL
60#define AU1X_SOCK1_PHYS_MEM 0xF84000000ULL
58#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000 61#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000
59#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000 62#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000
60#endif 63#endif
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 157e41423a0a..aa1cd4d3aa29 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -35,7 +35,6 @@
35#include <pcmcia/cs_types.h> 35#include <pcmcia/cs_types.h>
36#include <pcmcia/cs.h> 36#include <pcmcia/cs.h>
37#include <pcmcia/ss.h> 37#include <pcmcia/ss.h>
38#include <pcmcia/bulkmem.h>
39#include <pcmcia/cistpl.h> 38#include <pcmcia/cistpl.h>
40#include <pcmcia/bus_ops.h> 39#include <pcmcia/bus_ops.h>
41#include "cs_internal.h" 40#include "cs_internal.h"
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
index c78ed5347510..8a9b18cee847 100644
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ b/drivers/pcmcia/au1000_xxs1500.c
@@ -39,7 +39,6 @@
39#include <pcmcia/cs_types.h> 39#include <pcmcia/cs_types.h>
40#include <pcmcia/cs.h> 40#include <pcmcia/cs.h>
41#include <pcmcia/ss.h> 41#include <pcmcia/ss.h>
42#include <pcmcia/bulkmem.h>
43#include <pcmcia/cistpl.h> 42#include <pcmcia/cistpl.h>
44#include <pcmcia/bus_ops.h> 43#include <pcmcia/bus_ops.h>
45#include "cs_internal.h" 44#include "cs_internal.h"
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
new file mode 100644
index 000000000000..bb7338863fb9
--- /dev/null
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -0,0 +1,339 @@
1/*
2 * file: drivers/pcmcia/bfin_cf.c
3 *
4 * based on: drivers/pcmcia/omap_cf.c
5 * omap_cf.c -- OMAP 16xx CompactFlash controller driver
6 *
7 * Copyright (c) 2005 David Brownell
8 * Copyright (c) 2006-2008 Michael Hennerich Analog Devices Inc.
9 *
10 * bugs: enter bugs at http://blackfin.uclinux.org/
11 *
12 * this program is free software; you can redistribute it and/or modify
13 * it under the terms of the gnu general public license as published by
14 * the free software foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * this program is distributed in the hope that it will be useful,
18 * but without any warranty; without even the implied warranty of
19 * merchantability or fitness for a particular purpose. see the
20 * gnu general public license for more details.
21 *
22 * you should have received a copy of the gnu general public license
23 * along with this program; see the file copying.
24 * if not, write to the free software foundation,
25 * 59 temple place - suite 330, boston, ma 02111-1307, usa.
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/platform_device.h>
32#include <linux/errno.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/interrupt.h>
36#include <linux/irq.h>
37#include <linux/io.h>
38
39#include <pcmcia/ss.h>
40#include <pcmcia/cisreg.h>
41#include <asm/gpio.h>
42
43#define SZ_1K 0x00000400
44#define SZ_8K 0x00002000
45#define SZ_2K (2 * SZ_1K)
46
47#define POLL_INTERVAL (2 * HZ)
48
49#define CF_ATASEL_ENA 0x20311802 /* Inverts RESET */
50#define CF_ATASEL_DIS 0x20311800
51
52#define bfin_cf_present(pfx) (gpio_get_value(pfx))
53
54/*--------------------------------------------------------------------------*/
55
56static const char driver_name[] = "bfin_cf_pcmcia";
57
58struct bfin_cf_socket {
59 struct pcmcia_socket socket;
60
61 struct timer_list timer;
62 unsigned present:1;
63 unsigned active:1;
64
65 struct platform_device *pdev;
66 unsigned long phys_cf_io;
67 unsigned long phys_cf_attr;
68 u_int irq;
69 u_short cd_pfx;
70};
71
72/*--------------------------------------------------------------------------*/
73static int bfin_cf_reset(void)
74{
75 outw(0, CF_ATASEL_ENA);
76 mdelay(200);
77 outw(0, CF_ATASEL_DIS);
78
79 return 0;
80}
81
82static int bfin_cf_ss_init(struct pcmcia_socket *s)
83{
84 return 0;
85}
86
87/* the timer is primarily to kick this socket's pccardd */
88static void bfin_cf_timer(unsigned long _cf)
89{
90 struct bfin_cf_socket *cf = (void *)_cf;
91 unsigned short present = bfin_cf_present(cf->cd_pfx);
92
93 if (present != cf->present) {
94 cf->present = present;
95 dev_dbg(&cf->pdev->dev, ": card %s\n",
96 present ? "present" : "gone");
97 pcmcia_parse_events(&cf->socket, SS_DETECT);
98 }
99
100 if (cf->active)
101 mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
102}
103
104static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
105{
106 struct bfin_cf_socket *cf;
107
108 if (!sp)
109 return -EINVAL;
110
111 cf = container_of(s, struct bfin_cf_socket, socket);
112
113 if (bfin_cf_present(cf->cd_pfx)) {
114 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
115 s->irq.AssignedIRQ = 0;
116 s->pci_irq = cf->irq;
117
118 } else
119 *sp = 0;
120 return 0;
121}
122
123static int
124bfin_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
125{
126
127 struct bfin_cf_socket *cf;
128 cf = container_of(sock, struct bfin_cf_socket, socket);
129
130 switch (s->Vcc) {
131 case 0:
132 case 33:
133 break;
134 case 50:
135 break;
136 default:
137 return -EINVAL;
138 }
139
140 if (s->flags & SS_RESET) {
141 disable_irq(cf->irq);
142 bfin_cf_reset();
143 enable_irq(cf->irq);
144 }
145
146 dev_dbg(&cf->pdev->dev, ": Vcc %d, io_irq %d, flags %04x csc %04x\n",
147 s->Vcc, s->io_irq, s->flags, s->csc_mask);
148
149 return 0;
150}
151
152static int bfin_cf_ss_suspend(struct pcmcia_socket *s)
153{
154 return bfin_cf_set_socket(s, &dead_socket);
155}
156
157/* regions are 2K each: mem, attrib, io (and reserved-for-ide) */
158
159static int bfin_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
160{
161 struct bfin_cf_socket *cf;
162
163 cf = container_of(s, struct bfin_cf_socket, socket);
164 io->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
165 io->start = cf->phys_cf_io;
166 io->stop = io->start + SZ_2K - 1;
167 return 0;
168}
169
170static int
171bfin_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
172{
173 struct bfin_cf_socket *cf;
174
175 if (map->card_start)
176 return -EINVAL;
177 cf = container_of(s, struct bfin_cf_socket, socket);
178 map->static_start = cf->phys_cf_io;
179 map->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
180 if (map->flags & MAP_ATTRIB)
181 map->static_start = cf->phys_cf_attr;
182
183 return 0;
184}
185
186static struct pccard_operations bfin_cf_ops = {
187 .init = bfin_cf_ss_init,
188 .suspend = bfin_cf_ss_suspend,
189 .get_status = bfin_cf_get_status,
190 .set_socket = bfin_cf_set_socket,
191 .set_io_map = bfin_cf_set_io_map,
192 .set_mem_map = bfin_cf_set_mem_map,
193};
194
195/*--------------------------------------------------------------------------*/
196
197static int __devinit bfin_cf_probe(struct platform_device *pdev)
198{
199 struct bfin_cf_socket *cf;
200 struct resource *io_mem, *attr_mem;
201 int irq;
202 unsigned short cd_pfx;
203 int status = 0;
204
205 dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n");
206
207 irq = platform_get_irq(pdev, 0);
208 if (!irq)
209 return -EINVAL;
210
211 cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */
212
213 if (gpio_request(cd_pfx, "pcmcia: CD")) {
214 dev_err(&pdev->dev,
215 "Failed ro request Card Detect GPIO_%d\n",
216 cd_pfx);
217 return -EBUSY;
218 }
219 gpio_direction_input(cd_pfx);
220
221 cf = kzalloc(sizeof *cf, GFP_KERNEL);
222 if (!cf) {
223 gpio_free(cd_pfx);
224 return -ENOMEM;
225 }
226
227 cf->cd_pfx = cd_pfx;
228
229 setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf);
230
231 cf->pdev = pdev;
232 platform_set_drvdata(pdev, cf);
233
234 cf->irq = irq;
235 cf->socket.pci_irq = irq;
236
237 set_irq_type(irq, IRQF_TRIGGER_LOW);
238
239 io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
241
242 if (!io_mem || !attr_mem)
243 goto fail0;
244
245 cf->phys_cf_io = io_mem->start;
246 cf->phys_cf_attr = attr_mem->start;
247
248 /* pcmcia layer only remaps "real" memory */
249 cf->socket.io_offset = (unsigned long)
250 ioremap(cf->phys_cf_io, SZ_2K);
251
252 if (!cf->socket.io_offset)
253 goto fail0;
254
255 dev_err(&pdev->dev, ": on irq %d\n", irq);
256
257 dev_dbg(&pdev->dev, ": %s\n",
258 bfin_cf_present(cf->cd_pfx) ? "present" : "(not present)");
259
260 cf->socket.owner = THIS_MODULE;
261 cf->socket.dev.parent = &pdev->dev;
262 cf->socket.ops = &bfin_cf_ops;
263 cf->socket.resource_ops = &pccard_static_ops;
264 cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
265 | SS_CAP_MEM_ALIGN;
266 cf->socket.map_size = SZ_2K;
267
268 status = pcmcia_register_socket(&cf->socket);
269 if (status < 0)
270 goto fail2;
271
272 cf->active = 1;
273 mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
274 return 0;
275
276fail2:
277 iounmap((void __iomem *)cf->socket.io_offset);
278 release_mem_region(cf->phys_cf_io, SZ_8K);
279
280fail0:
281 gpio_free(cf->cd_pfx);
282 kfree(cf);
283 platform_set_drvdata(pdev, NULL);
284
285 return status;
286}
287
288static int __devexit bfin_cf_remove(struct platform_device *pdev)
289{
290 struct bfin_cf_socket *cf = platform_get_drvdata(pdev);
291
292 gpio_free(cf->cd_pfx);
293 cf->active = 0;
294 pcmcia_unregister_socket(&cf->socket);
295 del_timer_sync(&cf->timer);
296 iounmap((void __iomem *)cf->socket.io_offset);
297 release_mem_region(cf->phys_cf_io, SZ_8K);
298 platform_set_drvdata(pdev, NULL);
299 kfree(cf);
300 return 0;
301}
302
303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
304{
305 return pcmcia_socket_dev_suspend(&pdev->dev, mesg);
306}
307
308static int bfin_cf_resume(struct platform_device *pdev)
309{
310 return pcmcia_socket_dev_resume(&pdev->dev);
311}
312
313static struct platform_driver bfin_cf_driver = {
314 .driver = {
315 .name = (char *)driver_name,
316 .owner = THIS_MODULE,
317 },
318 .probe = bfin_cf_probe,
319 .remove = __devexit_p(bfin_cf_remove),
320 .suspend = bfin_cf_suspend,
321 .resume = bfin_cf_resume,
322};
323
324static int __init bfin_cf_init(void)
325{
326 return platform_driver_register(&bfin_cf_driver);
327}
328
329static void __exit bfin_cf_exit(void)
330{
331 platform_driver_unregister(&bfin_cf_driver);
332}
333
334module_init(bfin_cf_init);
335module_exit(bfin_cf_exit);
336
337MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>")
338MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
339MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index fb2f38dc92c5..911ca0e8dfc2 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -30,11 +30,9 @@
30#include <asm/irq.h> 30#include <asm/irq.h>
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define IN_CARD_SERVICES
34#include <pcmcia/cs_types.h> 33#include <pcmcia/cs_types.h>
35#include <pcmcia/ss.h> 34#include <pcmcia/ss.h>
36#include <pcmcia/cs.h> 35#include <pcmcia/cs.h>
37#include <pcmcia/bulkmem.h>
38#include <pcmcia/cistpl.h> 36#include <pcmcia/cistpl.h>
39#include "cs_internal.h" 37#include "cs_internal.h"
40 38
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 36379535f9da..9fcff0c33619 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -30,7 +30,6 @@
30#include <pcmcia/cs_types.h> 30#include <pcmcia/cs_types.h>
31#include <pcmcia/ss.h> 31#include <pcmcia/ss.h>
32#include <pcmcia/cs.h> 32#include <pcmcia/cs.h>
33#include <pcmcia/bulkmem.h>
34#include <pcmcia/cisreg.h> 33#include <pcmcia/cisreg.h>
35#include <pcmcia/cistpl.h> 34#include <pcmcia/cistpl.h>
36#include "cs_internal.h" 35#include "cs_internal.h"
@@ -1439,10 +1438,11 @@ EXPORT_SYMBOL(pccard_read_tuple);
1439 1438
1440======================================================================*/ 1439======================================================================*/
1441 1440
1442int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_t *info) 1441int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned int *info)
1443{ 1442{
1444 tuple_t *tuple; 1443 tuple_t *tuple;
1445 cisparse_t *p; 1444 cisparse_t *p;
1445 unsigned int count = 0;
1446 int ret, reserved, dev_ok = 0, ident_ok = 0; 1446 int ret, reserved, dev_ok = 0, ident_ok = 0;
1447 1447
1448 if (!s) 1448 if (!s)
@@ -1457,7 +1457,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1457 return CS_OUT_OF_RESOURCE; 1457 return CS_OUT_OF_RESOURCE;
1458 } 1458 }
1459 1459
1460 info->Chains = reserved = 0; 1460 count = reserved = 0;
1461 tuple->DesiredTuple = RETURN_FIRST_TUPLE; 1461 tuple->DesiredTuple = RETURN_FIRST_TUPLE;
1462 tuple->Attributes = TUPLE_RETURN_COMMON; 1462 tuple->Attributes = TUPLE_RETURN_COMMON;
1463 ret = pccard_get_first_tuple(s, function, tuple); 1463 ret = pccard_get_first_tuple(s, function, tuple);
@@ -1482,7 +1482,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1482 if (!dev_ok && !ident_ok) 1482 if (!dev_ok && !ident_ok)
1483 goto done; 1483 goto done;
1484 1484
1485 for (info->Chains = 1; info->Chains < MAX_TUPLES; info->Chains++) { 1485 for (count = 1; count < MAX_TUPLES; count++) {
1486 ret = pccard_get_next_tuple(s, function, tuple); 1486 ret = pccard_get_next_tuple(s, function, tuple);
1487 if (ret != CS_SUCCESS) break; 1487 if (ret != CS_SUCCESS) break;
1488 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || 1488 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
@@ -1490,11 +1490,13 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_
1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) 1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
1491 reserved++; 1491 reserved++;
1492 } 1492 }
1493 if ((info->Chains == MAX_TUPLES) || (reserved > 5) || 1493 if ((count) || (reserved > 5) ||
1494 ((!dev_ok || !ident_ok) && (info->Chains > 10))) 1494 ((!dev_ok || !ident_ok) && (count > 10)))
1495 info->Chains = 0; 1495 count = 0;
1496 1496
1497done: 1497done:
1498 if (info)
1499 *info = count;
1498 kfree(tuple); 1500 kfree(tuple);
1499 kfree(p); 1501 kfree(p);
1500 return CS_SUCCESS; 1502 return CS_SUCCESS;
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 29276bd28295..d1207393fc3e 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -32,11 +32,9 @@
32#include <asm/system.h> 32#include <asm/system.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34 34
35#define IN_CARD_SERVICES
36#include <pcmcia/cs_types.h> 35#include <pcmcia/cs_types.h>
37#include <pcmcia/ss.h> 36#include <pcmcia/ss.h>
38#include <pcmcia/cs.h> 37#include <pcmcia/cs.h>
39#include <pcmcia/bulkmem.h>
40#include <pcmcia/cistpl.h> 38#include <pcmcia/cistpl.h>
41#include <pcmcia/cisreg.h> 39#include <pcmcia/cisreg.h>
42#include <pcmcia/ds.h> 40#include <pcmcia/ds.h>
@@ -238,7 +236,6 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
238 236
239 init_completion(&socket->socket_released); 237 init_completion(&socket->socket_released);
240 init_completion(&socket->thread_done); 238 init_completion(&socket->thread_done);
241 init_waitqueue_head(&socket->thread_wait);
242 mutex_init(&socket->skt_mutex); 239 mutex_init(&socket->skt_mutex);
243 spin_lock_init(&socket->thread_lock); 240 spin_lock_init(&socket->thread_lock);
244 241
@@ -278,10 +275,9 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
278 275
279 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops); 276 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops);
280 277
281 if (socket->thread) { 278 if (socket->thread)
282 wake_up(&socket->thread_wait);
283 kthread_stop(socket->thread); 279 kthread_stop(socket->thread);
284 } 280
285 release_cis_mem(socket); 281 release_cis_mem(socket);
286 282
287 /* remove from our own list */ 283 /* remove from our own list */
@@ -635,7 +631,6 @@ static void socket_detect_change(struct pcmcia_socket *skt)
635static int pccardd(void *__skt) 631static int pccardd(void *__skt)
636{ 632{
637 struct pcmcia_socket *skt = __skt; 633 struct pcmcia_socket *skt = __skt;
638 DECLARE_WAITQUEUE(wait, current);
639 int ret; 634 int ret;
640 635
641 skt->thread = current; 636 skt->thread = current;
@@ -656,7 +651,6 @@ static int pccardd(void *__skt)
656 if (ret) 651 if (ret)
657 dev_warn(&skt->dev, "err %d adding socket attributes\n", ret); 652 dev_warn(&skt->dev, "err %d adding socket attributes\n", ret);
658 653
659 add_wait_queue(&skt->thread_wait, &wait);
660 complete(&skt->thread_done); 654 complete(&skt->thread_done);
661 655
662 set_freezable(); 656 set_freezable();
@@ -694,8 +688,6 @@ static int pccardd(void *__skt)
694 /* make sure we are running before we exit */ 688 /* make sure we are running before we exit */
695 set_current_state(TASK_RUNNING); 689 set_current_state(TASK_RUNNING);
696 690
697 remove_wait_queue(&skt->thread_wait, &wait);
698
699 /* remove from the device core */ 691 /* remove from the device core */
700 pccard_sysfs_remove_socket(&skt->dev); 692 pccard_sysfs_remove_socket(&skt->dev);
701 device_unregister(&skt->dev); 693 device_unregister(&skt->dev);
@@ -716,7 +708,7 @@ void pcmcia_parse_events(struct pcmcia_socket *s, u_int events)
716 s->thread_events |= events; 708 s->thread_events |= events;
717 spin_unlock_irqrestore(&s->thread_lock, flags); 709 spin_unlock_irqrestore(&s->thread_lock, flags);
718 710
719 wake_up(&s->thread_wait); 711 wake_up_process(s->thread);
720 } 712 }
721} /* pcmcia_parse_events */ 713} /* pcmcia_parse_events */
722EXPORT_SYMBOL(pcmcia_parse_events); 714EXPORT_SYMBOL(pcmcia_parse_events);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index e7d5d141f24d..63dc1a28bda2 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -26,18 +26,6 @@
26#define CLIENT_WIN_REQ(i) (0x1<<(i)) 26#define CLIENT_WIN_REQ(i) (0x1<<(i))
27#define CLIENT_CARDBUS 0x8000 27#define CLIENT_CARDBUS 0x8000
28 28
29#define REGION_MAGIC 0xE3C9
30typedef struct region_t {
31 u_short region_magic;
32 u_short state;
33 dev_info_t dev_info;
34 struct pcmcia_device *mtd;
35 u_int MediaID;
36 region_info_t info;
37} region_t;
38
39#define REGION_STALE 0x01
40
41/* Each card function gets one of these guys */ 29/* Each card function gets one of these guys */
42typedef struct config_t { 30typedef struct config_t {
43 struct kref ref; 31 struct kref ref;
@@ -130,7 +118,6 @@ extern struct list_head pcmcia_socket_list;
130int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req); 118int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req);
131int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config); 119int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config);
132int pccard_reset_card(struct pcmcia_socket *skt); 120int pccard_reset_card(struct pcmcia_socket *skt);
133int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_status_t *status);
134 121
135 122
136struct pcmcia_callback{ 123struct pcmcia_callback{
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index e40775443d04..4174d9656e35 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -25,7 +25,6 @@
25#include <linux/kref.h> 25#include <linux/kref.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27 27
28#define IN_CARD_SERVICES
29#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
30#include <pcmcia/cs.h> 29#include <pcmcia/cs.h>
31#include <pcmcia/cistpl.h> 30#include <pcmcia/cistpl.h>
@@ -741,9 +740,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
741 740
742static int pcmcia_card_add(struct pcmcia_socket *s) 741static int pcmcia_card_add(struct pcmcia_socket *s)
743{ 742{
744 cisinfo_t cisinfo;
745 cistpl_longlink_mfc_t mfc; 743 cistpl_longlink_mfc_t mfc;
746 unsigned int no_funcs, i; 744 unsigned int no_funcs, i, no_chains;
747 int ret = 0; 745 int ret = 0;
748 746
749 if (!(s->resource_setup_done)) { 747 if (!(s->resource_setup_done)) {
@@ -757,8 +755,8 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
757 return -EAGAIN; /* try again, but later... */ 755 return -EAGAIN; /* try again, but later... */
758 } 756 }
759 757
760 ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo); 758 ret = pccard_validate_cis(s, BIND_FN_ALL, &no_chains);
761 if (ret || !cisinfo.Chains) { 759 if (ret || !no_chains) {
762 ds_dbg(0, "invalid CIS or invalid resources\n"); 760 ds_dbg(0, "invalid CIS or invalid resources\n");
763 return -ENODEV; 761 return -ENODEV;
764 } 762 }
@@ -852,7 +850,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
852{ 850{
853 struct pcmcia_socket *s = dev->socket; 851 struct pcmcia_socket *s = dev->socket;
854 const struct firmware *fw; 852 const struct firmware *fw;
855 char path[20]; 853 char path[FIRMWARE_NAME_MAX];
856 int ret = -ENOMEM; 854 int ret = -ENOMEM;
857 int no_funcs; 855 int no_funcs;
858 int old_funcs; 856 int old_funcs;
@@ -864,7 +862,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
864 862
865 ds_dbg(1, "trying to load CIS file %s\n", filename); 863 ds_dbg(1, "trying to load CIS file %s\n", filename);
866 864
867 if (strlen(filename) > 14) { 865 if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) {
868 printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n", 866 printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n",
869 filename); 867 filename);
870 return -EINVAL; 868 return -EINVAL;
diff --git a/drivers/pcmcia/hd64465_ss.c b/drivers/pcmcia/hd64465_ss.c
index f2e810f53c81..fb2bc1fb015d 100644
--- a/drivers/pcmcia/hd64465_ss.c
+++ b/drivers/pcmcia/hd64465_ss.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: hd64465_ss.c,v 1.7 2003/07/06 14:42:50 lethal Exp $
3 *
4 * Device driver for the PCMCIA controller module of the 2 * Device driver for the PCMCIA controller module of the
5 * Hitachi HD64465 handheld companion chip. 3 * Hitachi HD64465 handheld companion chip.
6 * 4 *
@@ -48,7 +46,6 @@
48#include <pcmcia/cistpl.h> 46#include <pcmcia/cistpl.h>
49#include <pcmcia/ds.h> 47#include <pcmcia/ds.h>
50#include <pcmcia/ss.h> 48#include <pcmcia/ss.h>
51#include <pcmcia/bulkmem.h>
52#include "cs_internal.h" 49#include "cs_internal.h"
53 50
54#define MODNAME "hd64465_ss" 51#define MODNAME "hd64465_ss"
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index e13618656ff7..46561face128 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Author: Arjan Van De Ven <arjanv@redhat.com> 6 * Author: Arjan Van De Ven <arjanv@redhat.com>
7 * Loosly based on i82365.c from the pcmcia-cs package 7 * Loosly based on i82365.c from the pcmcia-cs package
8 *
9 * $Id: i82092aa.c,v 1.2 2001/10/23 14:43:34 arjanv Exp $
10 */ 8 */
11 9
12#include <linux/kernel.h> 10#include <linux/kernel.h>
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index b0d453303c5d..8836d393ad02 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
6/* $Id: i82092aa.h,v 1.1.1.1 2001/09/19 14:53:15 dwmw2 Exp $ */
7
8/* Debuging defines */ 6/* Debuging defines */
9#ifdef NOTRACE 7#ifdef NOTRACE
10#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) 8#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 32a2ab119798..68f6b2702bc4 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1263,7 +1263,7 @@ static int __init init_i82365(void)
1263 1263
1264 ret = driver_register(&i82365_driver); 1264 ret = driver_register(&i82365_driver);
1265 if (ret) 1265 if (ret)
1266 return ret; 1266 goto err_out;
1267 1267
1268 i82365_device = platform_device_alloc("i82365", 0); 1268 i82365_device = platform_device_alloc("i82365", 0);
1269 if (i82365_device) { 1269 if (i82365_device) {
@@ -1273,10 +1273,8 @@ static int __init init_i82365(void)
1273 } else 1273 } else
1274 ret = -ENOMEM; 1274 ret = -ENOMEM;
1275 1275
1276 if (ret) { 1276 if (ret)
1277 driver_unregister(&i82365_driver); 1277 goto err_driver_unregister;
1278 return ret;
1279 }
1280 1278
1281 printk(KERN_INFO "Intel ISA PCIC probe: "); 1279 printk(KERN_INFO "Intel ISA PCIC probe: ");
1282 sockets = 0; 1280 sockets = 0;
@@ -1285,16 +1283,17 @@ static int __init init_i82365(void)
1285 1283
1286 if (sockets == 0) { 1284 if (sockets == 0) {
1287 printk("not found.\n"); 1285 printk("not found.\n");
1288 platform_device_unregister(i82365_device); 1286 ret = -ENODEV;
1289 release_region(i365_base, 2); 1287 goto err_dev_unregister;
1290 driver_unregister(&i82365_driver);
1291 return -ENODEV;
1292 } 1288 }
1293 1289
1294 /* Set up interrupt handler(s) */ 1290 /* Set up interrupt handler(s) */
1295 if (grab_irq != 0) 1291 if (grab_irq != 0)
1296 request_irq(cs_irq, pcic_interrupt, 0, "i82365", pcic_interrupt); 1292 ret = request_irq(cs_irq, pcic_interrupt, 0, "i82365", pcic_interrupt);
1297 1293
1294 if (ret)
1295 goto err_socket_release;
1296
1298 /* register sockets with the pcmcia core */ 1297 /* register sockets with the pcmcia core */
1299 for (i = 0; i < sockets; i++) { 1298 for (i = 0; i < sockets; i++) {
1300 socket[i].socket.dev.parent = &i82365_device->dev; 1299 socket[i].socket.dev.parent = &i82365_device->dev;
@@ -1324,7 +1323,23 @@ static int __init init_i82365(void)
1324 } 1323 }
1325 1324
1326 return 0; 1325 return 0;
1327 1326err_socket_release:
1327 for (i = 0; i < sockets; i++) {
1328 /* Turn off all interrupt sources! */
1329 i365_set(i, I365_CSCINT, 0);
1330 release_region(socket[i].ioaddr, 2);
1331 }
1332err_dev_unregister:
1333 platform_device_unregister(i82365_device);
1334 release_region(i365_base, 2);
1335#ifdef CONFIG_PNP
1336 if (i82365_pnpdev)
1337 pnp_disable_dev(i82365_pnpdev);
1338#endif
1339err_driver_unregister:
1340 driver_unregister(&i82365_driver);
1341err_out:
1342 return ret;
1328} /* init_i82365 */ 1343} /* init_i82365 */
1329 1344
1330static void __exit exit_i82365(void) 1345static void __exit exit_i82365(void)
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index ac70d2cb7dd4..13a5fbd50a07 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series. 2 * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series.
3 * 3 *
4 * (C) 1999-2000 Magnus Damm <damm@bitsmart.com> 4 * (C) 1999-2000 Magnus Damm <damm@opensource.se>
5 * (C) 2001-2002 Montavista Software, Inc. 5 * (C) 2001-2002 Montavista Software, Inc.
6 * <mlocke@mvista.com> 6 * <mlocke@mvista.com>
7 * 7 *
@@ -60,7 +60,6 @@
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/of_platform.h> 61#include <asm/of_platform.h>
62 62
63#include <pcmcia/version.h>
64#include <pcmcia/cs_types.h> 63#include <pcmcia/cs_types.h>
65#include <pcmcia/cs.h> 64#include <pcmcia/cs.h>
66#include <pcmcia/ss.h> 65#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 138396ef5be9..419f97fc9a62 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -30,10 +30,10 @@
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32 32
33#define IN_CARD_SERVICES
34#include <pcmcia/cs_types.h> 33#include <pcmcia/cs_types.h>
35#include <pcmcia/cs.h> 34#include <pcmcia/cs.h>
36#include <pcmcia/cistpl.h> 35#include <pcmcia/cistpl.h>
36#include <pcmcia/cisreg.h>
37#include <pcmcia/ds.h> 37#include <pcmcia/ds.h>
38#include <pcmcia/ss.h> 38#include <pcmcia/ss.h>
39 39
@@ -139,6 +139,154 @@ static int proc_read_drivers(char *buf, char **start, off_t pos,
139} 139}
140#endif 140#endif
141 141
142
143#ifdef CONFIG_PCMCIA_PROBE
144
145static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
146{
147 int irq;
148 u32 mask;
149
150 irq = adj->resource.irq.IRQ;
151 if ((irq < 0) || (irq > 15))
152 return CS_BAD_IRQ;
153
154 if (adj->Action != REMOVE_MANAGED_RESOURCE)
155 return 0;
156
157 mask = 1 << irq;
158
159 if (!(s->irq_mask & mask))
160 return 0;
161
162 s->irq_mask &= ~mask;
163
164 return 0;
165}
166
167#else
168
169static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
170 return CS_SUCCESS;
171}
172
173#endif
174
175static int pcmcia_adjust_resource_info(adjust_t *adj)
176{
177 struct pcmcia_socket *s;
178 int ret = CS_UNSUPPORTED_FUNCTION;
179 unsigned long flags;
180
181 down_read(&pcmcia_socket_list_rwsem);
182 list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
183
184 if (adj->Resource == RES_IRQ)
185 ret = adjust_irq(s, adj);
186
187 else if (s->resource_ops->add_io) {
188 unsigned long begin, end;
189
190 /* you can't use the old interface if the new
191 * one was used before */
192 spin_lock_irqsave(&s->lock, flags);
193 if ((s->resource_setup_new) &&
194 !(s->resource_setup_old)) {
195 spin_unlock_irqrestore(&s->lock, flags);
196 continue;
197 } else if (!(s->resource_setup_old))
198 s->resource_setup_old = 1;
199 spin_unlock_irqrestore(&s->lock, flags);
200
201 switch (adj->Resource) {
202 case RES_MEMORY_RANGE:
203 begin = adj->resource.memory.Base;
204 end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
205 if (s->resource_ops->add_mem)
206 ret =s->resource_ops->add_mem(s, adj->Action, begin, end);
207 case RES_IO_RANGE:
208 begin = adj->resource.io.BasePort;
209 end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
210 if (s->resource_ops->add_io)
211 ret = s->resource_ops->add_io(s, adj->Action, begin, end);
212 }
213 if (!ret) {
214 /* as there's no way we know this is the
215 * last call to adjust_resource_info, we
216 * always need to assume this is the latest
217 * one... */
218 spin_lock_irqsave(&s->lock, flags);
219 s->resource_setup_done = 1;
220 spin_unlock_irqrestore(&s->lock, flags);
221 }
222 }
223 }
224 up_read(&pcmcia_socket_list_rwsem);
225
226 return (ret);
227}
228
229/** pccard_get_status
230 *
231 * Get the current socket state bits. We don't support the latched
232 * SocketState yet: I haven't seen any point for it.
233 */
234
235static int pccard_get_status(struct pcmcia_socket *s,
236 struct pcmcia_device *p_dev,
237 cs_status_t *status)
238{
239 config_t *c;
240 int val;
241
242 s->ops->get_status(s, &val);
243 status->CardState = status->SocketState = 0;
244 status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
245 status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
246 status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
247 status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
248 if (s->state & SOCKET_SUSPEND)
249 status->CardState |= CS_EVENT_PM_SUSPEND;
250 if (!(s->state & SOCKET_PRESENT))
251 return CS_NO_CARD;
252
253 c = (p_dev) ? p_dev->function_config : NULL;
254
255 if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
256 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
257 u_char reg;
258 if (c->CardValues & PRESENT_PIN_REPLACE) {
259 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
260 status->CardState |=
261 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
262 status->CardState |=
263 (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
264 status->CardState |=
265 (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
266 status->CardState |=
267 (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
268 } else {
269 /* No PRR? Then assume we're always ready */
270 status->CardState |= CS_EVENT_READY_CHANGE;
271 }
272 if (c->CardValues & PRESENT_EXT_STATUS) {
273 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
274 status->CardState |=
275 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
276 }
277 return CS_SUCCESS;
278 }
279 status->CardState |=
280 (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
281 status->CardState |=
282 (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
283 status->CardState |=
284 (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
285 status->CardState |=
286 (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
287 return CS_SUCCESS;
288} /* pccard_get_status */
289
142/*====================================================================== 290/*======================================================================
143 291
144 These manage a ring buffer of events pending for one user process 292 These manage a ring buffer of events pending for one user process
@@ -557,8 +705,6 @@ static u_int ds_poll(struct file *file, poll_table *wait)
557 705
558/*====================================================================*/ 706/*====================================================================*/
559 707
560extern int pcmcia_adjust_resource_info(adjust_t *adj);
561
562static int ds_ioctl(struct inode * inode, struct file * file, 708static int ds_ioctl(struct inode * inode, struct file * file,
563 u_int cmd, u_long arg) 709 u_int cmd, u_long arg)
564{ 710{
@@ -660,7 +806,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
660 mutex_lock(&s->skt_mutex); 806 mutex_lock(&s->skt_mutex);
661 pcmcia_validate_mem(s); 807 pcmcia_validate_mem(s);
662 mutex_unlock(&s->skt_mutex); 808 mutex_unlock(&s->skt_mutex);
663 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo); 809 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo.Chains);
664 break; 810 break;
665 case DS_SUSPEND_CARD: 811 case DS_SUSPEND_CARD:
666 ret = pcmcia_suspend_card(s); 812 ret = pcmcia_suspend_card(s);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 1d128fbd1a92..4884a18cf9e6 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -21,11 +21,9 @@
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/device.h> 22#include <linux/device.h>
23 23
24#define IN_CARD_SERVICES
25#include <pcmcia/cs_types.h> 24#include <pcmcia/cs_types.h>
26#include <pcmcia/ss.h> 25#include <pcmcia/ss.h>
27#include <pcmcia/cs.h> 26#include <pcmcia/cs.h>
28#include <pcmcia/bulkmem.h>
29#include <pcmcia/cistpl.h> 27#include <pcmcia/cistpl.h>
30#include <pcmcia/cisreg.h> 28#include <pcmcia/cisreg.h>
31#include <pcmcia/ds.h> 29#include <pcmcia/ds.h>
@@ -311,74 +309,6 @@ int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle,
311EXPORT_SYMBOL(pcmcia_get_window); 309EXPORT_SYMBOL(pcmcia_get_window);
312 310
313 311
314/** pccard_get_status
315 *
316 * Get the current socket state bits. We don't support the latched
317 * SocketState yet: I haven't seen any point for it.
318 */
319
320int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev,
321 cs_status_t *status)
322{
323 config_t *c;
324 int val;
325
326 s->ops->get_status(s, &val);
327 status->CardState = status->SocketState = 0;
328 status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
329 status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
330 status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
331 status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
332 if (s->state & SOCKET_SUSPEND)
333 status->CardState |= CS_EVENT_PM_SUSPEND;
334 if (!(s->state & SOCKET_PRESENT))
335 return CS_NO_CARD;
336
337 c = (p_dev) ? p_dev->function_config : NULL;
338
339 if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
340 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
341 u_char reg;
342 if (c->CardValues & PRESENT_PIN_REPLACE) {
343 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
344 status->CardState |=
345 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
346 status->CardState |=
347 (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
348 status->CardState |=
349 (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
350 status->CardState |=
351 (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
352 } else {
353 /* No PRR? Then assume we're always ready */
354 status->CardState |= CS_EVENT_READY_CHANGE;
355 }
356 if (c->CardValues & PRESENT_EXT_STATUS) {
357 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
358 status->CardState |=
359 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
360 }
361 return CS_SUCCESS;
362 }
363 status->CardState |=
364 (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
365 status->CardState |=
366 (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
367 status->CardState |=
368 (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
369 status->CardState |=
370 (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
371 return CS_SUCCESS;
372} /* pccard_get_status */
373
374int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status)
375{
376 return pccard_get_status(p_dev->socket, p_dev, status);
377}
378EXPORT_SYMBOL(pcmcia_get_status);
379
380
381
382/** pcmcia_get_mem_page 312/** pcmcia_get_mem_page
383 * 313 *
384 * Change the card address of an already open memory window. 314 * Change the card address of an already open memory window.
@@ -812,6 +742,15 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
812 type = IRQF_SHARED; 742 type = IRQF_SHARED;
813 743
814#ifdef CONFIG_PCMCIA_PROBE 744#ifdef CONFIG_PCMCIA_PROBE
745
746#ifdef IRQ_NOAUTOEN
747 /* if the underlying IRQ infrastructure allows for it, only allocate
748 * the IRQ, but do not enable it
749 */
750 if (!(req->Attributes & IRQ_HANDLE_PRESENT))
751 type |= IRQ_NOAUTOEN;
752#endif /* IRQ_NOAUTOEN */
753
815 if (s->irq.AssignedIRQ != 0) { 754 if (s->irq.AssignedIRQ != 0) {
816 /* If the interrupt is already assigned, it must be the same */ 755 /* If the interrupt is already assigned, it must be the same */
817 irq = s->irq.AssignedIRQ; 756 irq = s->irq.AssignedIRQ;
@@ -966,7 +905,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev) {
966 pcmcia_release_configuration(p_dev); 905 pcmcia_release_configuration(p_dev);
967 pcmcia_release_io(p_dev, &p_dev->io); 906 pcmcia_release_io(p_dev, &p_dev->io);
968 pcmcia_release_irq(p_dev, &p_dev->irq); 907 pcmcia_release_irq(p_dev, &p_dev->irq);
969 if (&p_dev->win) 908 if (p_dev->win)
970 pcmcia_release_window(p_dev->win); 909 pcmcia_release_window(p_dev->win);
971} 910}
972EXPORT_SYMBOL(pcmcia_disable_device); 911EXPORT_SYMBOL(pcmcia_disable_device);
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 9414163c78e7..ccfdf1969a7f 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -33,7 +33,6 @@
33 33
34#include <pcmcia/cs_types.h> 34#include <pcmcia/cs_types.h>
35#include <pcmcia/ss.h> 35#include <pcmcia/ss.h>
36#include <pcmcia/bulkmem.h>
37#include <pcmcia/cistpl.h> 36#include <pcmcia/cistpl.h>
38 37
39#include "cs_internal.h" 38#include "cs_internal.h"
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index ce2226273aaa..c0e2afc79e3e 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -21,86 +21,6 @@
21#include "cs_internal.h" 21#include "cs_internal.h"
22 22
23 23
24#ifdef CONFIG_PCMCIA_IOCTL
25
26#ifdef CONFIG_PCMCIA_PROBE
27
28static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
29{
30 int irq;
31 u32 mask;
32
33 irq = adj->resource.irq.IRQ;
34 if ((irq < 0) || (irq > 15))
35 return CS_BAD_IRQ;
36
37 if (adj->Action != REMOVE_MANAGED_RESOURCE)
38 return 0;
39
40 mask = 1 << irq;
41
42 if (!(s->irq_mask & mask))
43 return 0;
44
45 s->irq_mask &= ~mask;
46
47 return 0;
48}
49
50#else
51
52static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
53 return CS_SUCCESS;
54}
55
56#endif
57
58
59int pcmcia_adjust_resource_info(adjust_t *adj)
60{
61 struct pcmcia_socket *s;
62 int ret = CS_UNSUPPORTED_FUNCTION;
63 unsigned long flags;
64
65 down_read(&pcmcia_socket_list_rwsem);
66 list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
67
68 if (adj->Resource == RES_IRQ)
69 ret = adjust_irq(s, adj);
70
71 else if (s->resource_ops->adjust_resource) {
72
73 /* you can't use the old interface if the new
74 * one was used before */
75 spin_lock_irqsave(&s->lock, flags);
76 if ((s->resource_setup_new) &&
77 !(s->resource_setup_old)) {
78 spin_unlock_irqrestore(&s->lock, flags);
79 continue;
80 } else if (!(s->resource_setup_old))
81 s->resource_setup_old = 1;
82 spin_unlock_irqrestore(&s->lock, flags);
83
84 ret = s->resource_ops->adjust_resource(s, adj);
85 if (!ret) {
86 /* as there's no way we know this is the
87 * last call to adjust_resource_info, we
88 * always need to assume this is the latest
89 * one... */
90 spin_lock_irqsave(&s->lock, flags);
91 s->resource_setup_done = 1;
92 spin_unlock_irqrestore(&s->lock, flags);
93 }
94 }
95 }
96 up_read(&pcmcia_socket_list_rwsem);
97
98 return (ret);
99}
100EXPORT_SYMBOL(pcmcia_adjust_resource_info);
101
102#endif
103
104int pcmcia_validate_mem(struct pcmcia_socket *s) 24int pcmcia_validate_mem(struct pcmcia_socket *s)
105{ 25{
106 if (s->resource_ops->validate_mem) 26 if (s->resource_ops->validate_mem)
@@ -164,7 +84,8 @@ struct pccard_resource_ops pccard_static_ops = {
164 .adjust_io_region = NULL, 84 .adjust_io_region = NULL,
165 .find_io = NULL, 85 .find_io = NULL,
166 .find_mem = NULL, 86 .find_mem = NULL,
167 .adjust_resource = NULL, 87 .add_io = NULL,
88 .add_mem = NULL,
168 .init = static_init, 89 .init = static_init,
169 .exit = NULL, 90 .exit = NULL,
170}; 91};
@@ -264,7 +185,8 @@ struct pccard_resource_ops pccard_iodyn_ops = {
264 .adjust_io_region = iodyn_adjust_io_region, 185 .adjust_io_region = iodyn_adjust_io_region,
265 .find_io = iodyn_find_io_region, 186 .find_io = iodyn_find_io_region,
266 .find_mem = NULL, 187 .find_mem = NULL,
267 .adjust_resource = NULL, 188 .add_io = NULL,
189 .add_mem = NULL,
268 .init = static_init, 190 .init = static_init,
269 .exit = NULL, 191 .exit = NULL,
270}; 192};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 0fcf763b9175..d0c1d63d1891 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -31,7 +31,6 @@
31#include <pcmcia/cs_types.h> 31#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h> 32#include <pcmcia/ss.h>
33#include <pcmcia/cs.h> 33#include <pcmcia/cs.h>
34#include <pcmcia/bulkmem.h>
35#include <pcmcia/cistpl.h> 34#include <pcmcia/cistpl.h>
36#include "cs_internal.h" 35#include "cs_internal.h"
37 36
@@ -261,21 +260,22 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
261======================================================================*/ 260======================================================================*/
262 261
263/* Validation function for cards with a valid CIS */ 262/* Validation function for cards with a valid CIS */
264static int readable(struct pcmcia_socket *s, struct resource *res, cisinfo_t *info) 263static int readable(struct pcmcia_socket *s, struct resource *res,
264 unsigned int *count)
265{ 265{
266 int ret = -1; 266 int ret = -1;
267 267
268 s->cis_mem.res = res; 268 s->cis_mem.res = res;
269 s->cis_virt = ioremap(res->start, s->map_size); 269 s->cis_virt = ioremap(res->start, s->map_size);
270 if (s->cis_virt) { 270 if (s->cis_virt) {
271 ret = pccard_validate_cis(s, BIND_FN_ALL, info); 271 ret = pccard_validate_cis(s, BIND_FN_ALL, count);
272 /* invalidate mapping and CIS cache */ 272 /* invalidate mapping and CIS cache */
273 iounmap(s->cis_virt); 273 iounmap(s->cis_virt);
274 s->cis_virt = NULL; 274 s->cis_virt = NULL;
275 destroy_cis_cache(s); 275 destroy_cis_cache(s);
276 } 276 }
277 s->cis_mem.res = NULL; 277 s->cis_mem.res = NULL;
278 if ((ret != 0) || (info->Chains == 0)) 278 if ((ret != 0) || (count == 0))
279 return 0; 279 return 0;
280 return 1; 280 return 1;
281} 281}
@@ -316,7 +316,7 @@ static int
316cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size) 316cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
317{ 317{
318 struct resource *res1, *res2; 318 struct resource *res1, *res2;
319 cisinfo_t info1, info2; 319 unsigned int info1, info2;
320 int ret = 0; 320 int ret = 0;
321 321
322 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe"); 322 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
@@ -330,7 +330,7 @@ cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
330 free_region(res2); 330 free_region(res2);
331 free_region(res1); 331 free_region(res1);
332 332
333 return (ret == 2) && (info1.Chains == info2.Chains); 333 return (ret == 2) && (info1 == info2);
334} 334}
335 335
336static int 336static int
@@ -766,21 +766,6 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
766} 766}
767 767
768 768
769static int nonstatic_adjust_resource_info(struct pcmcia_socket *s, adjust_t *adj)
770{
771 unsigned long end;
772
773 switch (adj->Resource) {
774 case RES_MEMORY_RANGE:
775 end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
776 return adjust_memory(s, adj->Action, adj->resource.memory.Base, end);
777 case RES_IO_RANGE:
778 end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
779 return adjust_io(s, adj->Action, adj->resource.io.BasePort, end);
780 }
781 return CS_UNSUPPORTED_FUNCTION;
782}
783
784#ifdef CONFIG_PCI 769#ifdef CONFIG_PCI
785static int nonstatic_autoadd_resources(struct pcmcia_socket *s) 770static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
786{ 771{
@@ -889,7 +874,8 @@ struct pccard_resource_ops pccard_nonstatic_ops = {
889 .adjust_io_region = nonstatic_adjust_io_region, 874 .adjust_io_region = nonstatic_adjust_io_region,
890 .find_io = nonstatic_find_io_region, 875 .find_io = nonstatic_find_io_region,
891 .find_mem = nonstatic_find_mem_region, 876 .find_mem = nonstatic_find_mem_region,
892 .adjust_resource = nonstatic_adjust_resource_info, 877 .add_io = adjust_io,
878 .add_mem = adjust_memory,
893 .init = nonstatic_init, 879 .init = nonstatic_init,
894 .exit = nonstatic_release_resource_db, 880 .exit = nonstatic_release_resource_db,
895}; 881};
@@ -1008,41 +994,34 @@ static ssize_t store_mem_db(struct device *dev,
1008} 994}
1009static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); 995static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db);
1010 996
1011static struct device_attribute *pccard_rsrc_attributes[] = { 997static struct attribute *pccard_rsrc_attributes[] = {
1012 &dev_attr_available_resources_io, 998 &dev_attr_available_resources_io.attr,
1013 &dev_attr_available_resources_mem, 999 &dev_attr_available_resources_mem.attr,
1014 NULL, 1000 NULL,
1015}; 1001};
1016 1002
1003static const struct attribute_group rsrc_attributes = {
1004 .attrs = pccard_rsrc_attributes,
1005};
1006
1017static int __devinit pccard_sysfs_add_rsrc(struct device *dev, 1007static int __devinit pccard_sysfs_add_rsrc(struct device *dev,
1018 struct class_interface *class_intf) 1008 struct class_interface *class_intf)
1019{ 1009{
1020 struct pcmcia_socket *s = dev_get_drvdata(dev); 1010 struct pcmcia_socket *s = dev_get_drvdata(dev);
1021 struct device_attribute **attr; 1011
1022 int ret = 0;
1023 if (s->resource_ops != &pccard_nonstatic_ops) 1012 if (s->resource_ops != &pccard_nonstatic_ops)
1024 return 0; 1013 return 0;
1025 1014 return sysfs_create_group(&dev->kobj, &rsrc_attributes);
1026 for (attr = pccard_rsrc_attributes; *attr; attr++) {
1027 ret = device_create_file(dev, *attr);
1028 if (ret)
1029 break;
1030 }
1031
1032 return ret;
1033} 1015}
1034 1016
1035static void __devexit pccard_sysfs_remove_rsrc(struct device *dev, 1017static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
1036 struct class_interface *class_intf) 1018 struct class_interface *class_intf)
1037{ 1019{
1038 struct pcmcia_socket *s = dev_get_drvdata(dev); 1020 struct pcmcia_socket *s = dev_get_drvdata(dev);
1039 struct device_attribute **attr;
1040 1021
1041 if (s->resource_ops != &pccard_nonstatic_ops) 1022 if (s->resource_ops != &pccard_nonstatic_ops)
1042 return; 1023 return;
1043 1024 sysfs_remove_group(&dev->kobj, &rsrc_attributes);
1044 for (attr = pccard_rsrc_attributes; *attr; attr++)
1045 device_remove_file(dev, *attr);
1046} 1025}
1047 1026
1048static struct class_interface pccard_rsrc_interface __refdata = { 1027static struct class_interface pccard_rsrc_interface __refdata = {
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 1edc1da9d353..91ef6a0da3ab 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -14,7 +14,6 @@
14#include <pcmcia/cs_types.h> 14#include <pcmcia/cs_types.h>
15#include <pcmcia/cs.h> 15#include <pcmcia/cs.h>
16#include <pcmcia/ss.h> 16#include <pcmcia/ss.h>
17#include <pcmcia/bulkmem.h>
18#include <pcmcia/cistpl.h> 17#include <pcmcia/cistpl.h>
19#include "cs_internal.h" 18#include "cs_internal.h"
20 19
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 562384d6f321..006a29e91d83 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -27,11 +27,9 @@
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29 29
30#define IN_CARD_SERVICES
31#include <pcmcia/cs_types.h> 30#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h> 31#include <pcmcia/ss.h>
33#include <pcmcia/cs.h> 32#include <pcmcia/cs.h>
34#include <pcmcia/bulkmem.h>
35#include <pcmcia/cistpl.h> 33#include <pcmcia/cistpl.h>
36#include <pcmcia/cisreg.h> 34#include <pcmcia/cisreg.h>
37#include <pcmcia/ds.h> 35#include <pcmcia/ds.h>
@@ -293,7 +291,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
293 count = 0; 291 count = 0;
294 else { 292 else {
295 struct pcmcia_socket *s; 293 struct pcmcia_socket *s;
296 cisinfo_t cisinfo; 294 unsigned int chains;
297 295
298 if (off + count > size) 296 if (off + count > size)
299 count = size - off; 297 count = size - off;
@@ -302,9 +300,9 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
302 300
303 if (!(s->state & SOCKET_PRESENT)) 301 if (!(s->state & SOCKET_PRESENT))
304 return -ENODEV; 302 return -ENODEV;
305 if (pccard_validate_cis(s, BIND_FN_ALL, &cisinfo)) 303 if (pccard_validate_cis(s, BIND_FN_ALL, &chains))
306 return -EIO; 304 return -EIO;
307 if (!cisinfo.Chains) 305 if (!chains)
308 return -ENODATA; 306 return -ENODATA;
309 307
310 count = pccard_extract_cis(s, buf, off, count); 308 count = pccard_extract_cis(s, buf, off, count);
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index d29657bf1b40..129db7bd06c3 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -155,7 +155,7 @@
155#define ENE_TEST_C9_TLTENABLE 0x02 155#define ENE_TEST_C9_TLTENABLE 0x02
156#define ENE_TEST_C9_PFENABLE_F0 0x04 156#define ENE_TEST_C9_PFENABLE_F0 0x04
157#define ENE_TEST_C9_PFENABLE_F1 0x08 157#define ENE_TEST_C9_PFENABLE_F1 0x08
158#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0) 158#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F1)
159#define ENE_TEST_C9_WPDISALBLE_F0 0x40 159#define ENE_TEST_C9_WPDISALBLE_F0 0x40
160#define ENE_TEST_C9_WPDISALBLE_F1 0x80 160#define ENE_TEST_C9_WPDISALBLE_F1 0x80
161#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1) 161#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1)
@@ -692,7 +692,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
692 goto out; 692 goto out;
693 693
694 /* check state */ 694 /* check state */
695 yenta_get_status(&socket->socket, &state); 695 yenta_get_status(&slot2->socket, &state);
696 if (state & SS_DETECT) { 696 if (state & SS_DETECT) {
697 ret = 0; 697 ret = 0;
698 goto out; 698 goto out;
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 2ef8cdfda4a7..90b9a6503e15 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -265,6 +265,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
265 } 265 }
266 266
267 platform_set_drvdata(pdev, rtc); 267 platform_set_drvdata(pdev, rtc);
268 device_init_wakeup(&pdev->dev, 1);
268 269
269 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", 270 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
270 (unsigned long)rtc->regs, rtc->irq); 271 (unsigned long)rtc->regs, rtc->irq);
@@ -284,6 +285,8 @@ static int __exit at32_rtc_remove(struct platform_device *pdev)
284{ 285{
285 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev); 286 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
286 287
288 device_init_wakeup(&pdev->dev, 0);
289
287 free_irq(rtc->irq, rtc); 290 free_irq(rtc->irq, rtc);
288 iounmap(rtc->regs); 291 iounmap(rtc->regs);
289 rtc_device_unregister(rtc->rtc); 292 rtc_device_unregister(rtc->rtc);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1a4025683362..1b6c52ef7339 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
995 now = get_clock(); 995 now = get_clock();
996 996
997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
998 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 998 cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
999 (unsigned int) intparm); 999 irb->scsw.cmd.dstat), (unsigned int) intparm);
1000 1000
1001 /* check for unsolicited interrupts */ 1001 /* check for unsolicited interrupts */
1002 cqr = (struct dasd_ccw_req *) intparm; 1002 cqr = (struct dasd_ccw_req *) intparm;
1003 if (!cqr || ((irb->scsw.cc == 1) && 1003 if (!cqr || ((irb->scsw.cmd.cc == 1) &&
1004 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1004 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1005 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1005 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
1006 if (cqr && cqr->status == DASD_CQR_IN_IO) 1006 if (cqr && cqr->status == DASD_CQR_IN_IO)
1007 cqr->status = DASD_CQR_QUEUED; 1007 cqr->status = DASD_CQR_QUEUED;
1008 device = dasd_device_from_cdev_locked(cdev); 1008 device = dasd_device_from_cdev_locked(cdev);
@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1025 1025
1026 /* Check for clear pending */ 1026 /* Check for clear pending */
1027 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1027 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1028 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1028 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1029 cqr->status = DASD_CQR_CLEARED; 1029 cqr->status = DASD_CQR_CLEARED;
1030 dasd_device_clear_timer(device); 1030 dasd_device_clear_timer(device);
1031 wake_up(&dasd_flush_wq); 1031 wake_up(&dasd_flush_wq);
@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1041 return; 1041 return;
1042 } 1042 }
1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1044 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1044 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
1045 next = NULL; 1045 next = NULL;
1046 expires = 0; 1046 expires = 0;
1047 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1047 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1048 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1048 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
1049 /* request was completed successfully */ 1049 /* request was completed successfully */
1050 cqr->status = DASD_CQR_SUCCESS; 1050 cqr->status = DASD_CQR_SUCCESS;
1051 cqr->stopclk = now; 1051 cqr->stopclk = now;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e6700df52df4..5c6e6f331cb0 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1572 1572
1573 /* determine the address of the CCW to be restarted */ 1573 /* determine the address of the CCW to be restarted */
1574 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1574 /* Imprecise ending is not set -> addr from IRB-SCSW */
1575 cpa = default_erp->refers->irb.scsw.cpa; 1575 cpa = default_erp->refers->irb.scsw.cmd.cpa;
1576 1576
1577 if (cpa == 0) { 1577 if (cpa == 0) {
1578 1578
@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1725 1725
1726 /* determine the address of the CCW to be restarted */ 1726 /* determine the address of the CCW to be restarted */
1727 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1727 /* Imprecise ending is not set -> addr from IRB-SCSW */
1728 cpa = previous_erp->irb.scsw.cpa; 1728 cpa = previous_erp->irb.scsw.cmd.cpa;
1729 1729
1730 if (cpa == 0) { 1730 if (cpa == 0) {
1731 1731
@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2171{ 2171{
2172 struct dasd_device *device = erp->startdev; 2172 struct dasd_device *device = erp->startdev;
2173 2173
2174 if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK 2174 if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
2175 | SCHN_STAT_CHN_CTRL_CHK)) { 2175 | SCHN_STAT_CHN_CTRL_CHK)) {
2176 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2176 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2177 "channel or interface control check"); 2177 "channel or interface control check");
@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2352 2352
2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) && 2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
2354 (cqr2->irb.esw.esw0.erw.cons == 0)) { 2354 (cqr2->irb.esw.esw0.erw.cons == 0)) {
2355 if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2355 if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2356 SCHN_STAT_CHN_CTRL_CHK)) == 2356 SCHN_STAT_CHN_CTRL_CHK)) ==
2357 (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2357 (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2358 SCHN_STAT_CHN_CTRL_CHK))) 2358 SCHN_STAT_CHN_CTRL_CHK)))
2359 return 1; /* match with ifcc*/ 2359 return 1; /* match with ifcc*/
2360 } 2360 }
@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2622 } 2622 }
2623 2623
2624 /* double-check if current erp/cqr was successfull */ 2624 /* double-check if current erp/cqr was successfull */
2625 if ((cqr->irb.scsw.cstat == 0x00) && 2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
2626 (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2626 (cqr->irb.scsw.cmd.dstat ==
2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
2627 2628
2628 DEV_MESSAGE(KERN_DEBUG, device, 2629 DEV_MESSAGE(KERN_DEBUG, device,
2629 "ERP called for successful request %p" 2630 "ERP called for successful request %p"
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a0edae091b5e..e0b77210d37a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1404 1404
1405 /* first of all check for state change pending interrupt */ 1405 /* first of all check for state change pending interrupt */
1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1407 if ((irb->scsw.dstat & mask) == mask) { 1407 if ((irb->scsw.cmd.dstat & mask) == mask) {
1408 dasd_generic_handle_state_change(device); 1408 dasd_generic_handle_state_change(device);
1409 return; 1409 return;
1410 } 1410 }
1411 1411
1412 /* summary unit check */ 1412 /* summary unit check */
1413 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { 1413 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
1414 (irb->ecw[7] == 0x0D)) {
1414 dasd_alias_handle_summary_unit_check(device, irb); 1415 dasd_alias_handle_summary_unit_check(device, irb);
1415 return; 1416 return;
1416 } 1417 }
@@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2068 device->cdev->dev.bus_id); 2069 device->cdev->dev.bus_id);
2069 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2070 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2070 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 2071 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2071 irb->scsw.cstat, irb->scsw.dstat); 2072 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
2072 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2073 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2073 " device %s: Failing CCW: %p\n", 2074 " device %s: Failing CCW: %p\n",
2074 device->cdev->dev.bus_id, 2075 device->cdev->dev.bus_id,
2075 (void *) (addr_t) irb->scsw.cpa); 2076 (void *) (addr_t) irb->scsw.cmd.cpa);
2076 if (irb->esw.esw0.erw.cons) { 2077 if (irb->esw.esw0.erw.cons) {
2077 for (sl = 0; sl < 4; sl++) { 2078 for (sl = 0; sl < 4; sl++) {
2078 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2079 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2122 /* scsw->cda is either valid or zero */ 2123 /* scsw->cda is either valid or zero */
2123 len = 0; 2124 len = 0;
2124 from = ++to; 2125 from = ++to;
2125 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ 2126 fail = (struct ccw1 *)(addr_t)
2127 irb->scsw.cmd.cpa; /* failing CCW */
2126 if (from < fail - 2) { 2128 if (from < fail - 2) {
2127 from = fail - 2; /* there is a gap - print header */ 2129 from = fail - 2; /* there is a gap - print header */
2128 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2130 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 116611583df8..aee4656127f7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
222 222
223 /* first of all check for state change pending interrupt */ 223 /* first of all check for state change pending interrupt */
224 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 224 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
225 if ((irb->scsw.dstat & mask) == mask) { 225 if ((irb->scsw.cmd.dstat & mask) == mask) {
226 dasd_generic_handle_state_change(device); 226 dasd_generic_handle_state_change(device);
227 return; 227 return;
228 } 228 }
@@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
449 device->cdev->dev.bus_id); 449 device->cdev->dev.bus_id);
450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
451 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 451 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
452 irb->scsw.cstat, irb->scsw.dstat); 452 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
453 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 453 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
454 " device %s: Failing CCW: %p\n", 454 " device %s: Failing CCW: %p\n",
455 device->cdev->dev.bus_id, 455 device->cdev->dev.bus_id,
456 (void *) (addr_t) irb->scsw.cpa); 456 (void *) (addr_t) irb->scsw.cmd.cpa);
457 if (irb->esw.esw0.erw.cons) { 457 if (irb->esw.esw0.erw.cons) {
458 for (sl = 0; sl < 4; sl++) { 458 for (sl = 0; sl < 4; sl++) {
459 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 459 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
498 498
499 /* print failing CCW area */ 499 /* print failing CCW area */
500 len = 0; 500 len = 0;
501 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { 501 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
502 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; 502 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
504 } 504 }
505 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); 505 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
506 while (act <= end) { 506 while (act <= end) {
507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
508 " CCW %p: %08X %08X DAT:", 508 " CCW %p: %08X %08X DAT:",
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index bb52d2fbac18..01fcdd91b846 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
167 struct dcssblk_dev_info *dev_info; 167 struct dcssblk_dev_info *dev_info;
168 int rc; 168 int rc;
169 169
170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
171 PRINT_WARN("Invalid value, must be 0 or 1\n");
172 return -EINVAL; 171 return -EINVAL;
173 }
174 down_write(&dcssblk_devices_sem); 172 down_write(&dcssblk_devices_sem);
175 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 173 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
176 if (atomic_read(&dev_info->use_count)) { 174 if (atomic_read(&dev_info->use_count)) {
@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
215 set_disk_ro(dev_info->gd, 0); 213 set_disk_ro(dev_info->gd, 0);
216 } 214 }
217 } else { 215 } else {
218 PRINT_WARN("Invalid value, must be 0 or 1\n");
219 rc = -EINVAL; 216 rc = -EINVAL;
220 goto out; 217 goto out;
221 } 218 }
@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
258{ 255{
259 struct dcssblk_dev_info *dev_info; 256 struct dcssblk_dev_info *dev_info;
260 257
261 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 258 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
262 PRINT_WARN("Invalid value, must be 0 or 1\n");
263 return -EINVAL; 259 return -EINVAL;
264 }
265 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 260 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
266 261
267 down_write(&dcssblk_devices_sem); 262 down_write(&dcssblk_devices_sem);
@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
289 } 284 }
290 } else { 285 } else {
291 up_write(&dcssblk_devices_sem); 286 up_write(&dcssblk_devices_sem);
292 PRINT_WARN("Invalid value, must be 0 or 1\n");
293 return -EINVAL; 287 return -EINVAL;
294 } 288 }
295 up_write(&dcssblk_devices_sem); 289 up_write(&dcssblk_devices_sem);
@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
441 goto out; 435 goto out;
442 436
443unregister_dev: 437unregister_dev:
444 PRINT_ERR("device_create_file() failed!\n");
445 list_del(&dev_info->lh); 438 list_del(&dev_info->lh);
446 blk_cleanup_queue(dev_info->dcssblk_queue); 439 blk_cleanup_queue(dev_info->dcssblk_queue);
447 dev_info->gd->queue = NULL; 440 dev_info->gd->queue = NULL;
@@ -702,10 +695,8 @@ dcssblk_check_params(void)
702static void __exit 695static void __exit
703dcssblk_exit(void) 696dcssblk_exit(void)
704{ 697{
705 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
706 s390_root_dev_unregister(dcssblk_root_dev); 698 s390_root_dev_unregister(dcssblk_root_dev);
707 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 699 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
708 PRINT_DEBUG("...finished!\n");
709} 700}
710 701
711static int __init 702static int __init
@@ -713,27 +704,21 @@ dcssblk_init(void)
713{ 704{
714 int rc; 705 int rc;
715 706
716 PRINT_DEBUG("DCSSBLOCK INIT...\n");
717 dcssblk_root_dev = s390_root_dev_register("dcssblk"); 707 dcssblk_root_dev = s390_root_dev_register("dcssblk");
718 if (IS_ERR(dcssblk_root_dev)) { 708 if (IS_ERR(dcssblk_root_dev))
719 PRINT_ERR("device_register() failed!\n");
720 return PTR_ERR(dcssblk_root_dev); 709 return PTR_ERR(dcssblk_root_dev);
721 }
722 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 710 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
723 if (rc) { 711 if (rc) {
724 PRINT_ERR("device_create_file(add) failed!\n");
725 s390_root_dev_unregister(dcssblk_root_dev); 712 s390_root_dev_unregister(dcssblk_root_dev);
726 return rc; 713 return rc;
727 } 714 }
728 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 715 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
729 if (rc) { 716 if (rc) {
730 PRINT_ERR("device_create_file(remove) failed!\n");
731 s390_root_dev_unregister(dcssblk_root_dev); 717 s390_root_dev_unregister(dcssblk_root_dev);
732 return rc; 718 return rc;
733 } 719 }
734 rc = register_blkdev(0, DCSSBLK_NAME); 720 rc = register_blkdev(0, DCSSBLK_NAME);
735 if (rc < 0) { 721 if (rc < 0) {
736 PRINT_ERR("Can't get dynamic major!\n");
737 s390_root_dev_unregister(dcssblk_root_dev); 722 s390_root_dev_unregister(dcssblk_root_dev);
738 return rc; 723 return rc;
739 } 724 }
@@ -742,7 +727,6 @@ dcssblk_init(void)
742 727
743 dcssblk_check_params(); 728 dcssblk_check_params();
744 729
745 PRINT_DEBUG("...finished!\n");
746 return 0; 730 return 0;
747} 731}
748 732
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index f231bc21b1ca..dd9b986389a2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
101 if (cc == 3) 101 if (cc == 3)
102 return -ENXIO; 102 return -ENXIO;
103 if (cc == 2) { 103 if (cc == 2)
104 PRINT_ERR("expanded storage lost!\n");
105 return -ENXIO; 104 return -ENXIO;
106 } 105 if (cc == 1)
107 if (cc == 1) {
108 PRINT_ERR("page in failed for page index %u.\n",
109 xpage_index);
110 return -EIO; 106 return -EIO;
111 }
112 return 0; 107 return 0;
113} 108}
114 109
@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
135 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 130 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
136 if (cc == 3) 131 if (cc == 3)
137 return -ENXIO; 132 return -ENXIO;
138 if (cc == 2) { 133 if (cc == 2)
139 PRINT_ERR("expanded storage lost!\n");
140 return -ENXIO; 134 return -ENXIO;
141 } 135 if (cc == 1)
142 if (cc == 1) {
143 PRINT_ERR("page out failed for page index %u.\n",
144 xpage_index);
145 return -EIO; 136 return -EIO;
146 }
147 return 0; 137 return 0;
148} 138}
149 139
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 3e5653c92f4b..d3ec9b55ab35 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -93,9 +93,6 @@ struct raw3215_info {
93 struct raw3215_req *queued_write;/* pointer to queued write requests */ 93 struct raw3215_req *queued_write;/* pointer to queued write requests */
94 wait_queue_head_t empty_wait; /* wait queue for flushing */ 94 wait_queue_head_t empty_wait; /* wait queue for flushing */
95 struct timer_list timer; /* timer for delayed output */ 95 struct timer_list timer; /* timer for delayed output */
96 char *message; /* pending message from raw3215_irq */
97 int msg_dstat; /* dstat for pending message */
98 int msg_cstat; /* cstat for pending message */
99 int line_pos; /* position on the line (for tabs) */ 96 int line_pos; /* position on the line (for tabs) */
100 char ubuffer[80]; /* copy_from_user buffer */ 97 char ubuffer[80]; /* copy_from_user buffer */
101}; 98};
@@ -359,11 +356,6 @@ raw3215_tasklet(void *data)
359 raw3215_mk_write_req(raw); 356 raw3215_mk_write_req(raw);
360 raw3215_try_io(raw); 357 raw3215_try_io(raw);
361 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 358 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
362 /* Check for pending message from raw3215_irq */
363 if (raw->message != NULL) {
364 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
365 raw->message = NULL;
366 }
367 tty = raw->tty; 359 tty = raw->tty;
368 if (tty != NULL && 360 if (tty != NULL &&
369 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { 361 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
381 struct raw3215_req *req; 373 struct raw3215_req *req;
382 struct tty_struct *tty; 374 struct tty_struct *tty;
383 int cstat, dstat; 375 int cstat, dstat;
384 int count, slen; 376 int count;
385 377
386 raw = cdev->dev.driver_data; 378 raw = cdev->dev.driver_data;
387 req = (struct raw3215_req *) intparm; 379 req = (struct raw3215_req *) intparm;
388 cstat = irb->scsw.cstat; 380 cstat = irb->scsw.cmd.cstat;
389 dstat = irb->scsw.dstat; 381 dstat = irb->scsw.cmd.dstat;
390 if (cstat != 0) { 382 if (cstat != 0)
391 raw->message = KERN_WARNING
392 "Got nonzero channel status in raw3215_irq "
393 "(dev sts 0x%2x, sch sts 0x%2x)";
394 raw->msg_dstat = dstat;
395 raw->msg_cstat = cstat;
396 tasklet_schedule(&raw->tasklet); 383 tasklet_schedule(&raw->tasklet);
397 }
398 if (dstat & 0x01) { /* we got a unit exception */ 384 if (dstat & 0x01) { /* we got a unit exception */
399 dstat &= ~0x01; /* we can ignore it */ 385 dstat &= ~0x01; /* we can ignore it */
400 } 386 }
@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
404 break; 390 break;
405 /* Attention interrupt, someone hit the enter key */ 391 /* Attention interrupt, someone hit the enter key */
406 raw3215_mk_read_req(raw); 392 raw3215_mk_read_req(raw);
407 if (MACHINE_IS_P390)
408 memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
409 tasklet_schedule(&raw->tasklet); 393 tasklet_schedule(&raw->tasklet);
410 break; 394 break;
411 case 0x08: 395 case 0x08:
@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
415 return; /* That shouldn't happen ... */ 399 return; /* That shouldn't happen ... */
416 if (req->type == RAW3215_READ) { 400 if (req->type == RAW3215_READ) {
417 /* store residual count, then wait for device end */ 401 /* store residual count, then wait for device end */
418 req->residual = irb->scsw.count; 402 req->residual = irb->scsw.cmd.count;
419 } 403 }
420 if (dstat == 0x08) 404 if (dstat == 0x08)
421 break; 405 break;
@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
428 412
429 tty = raw->tty; 413 tty = raw->tty;
430 count = 160 - req->residual; 414 count = 160 - req->residual;
431 if (MACHINE_IS_P390) {
432 slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
433 if (count > slen)
434 count = slen;
435 } else
436 EBCASC(raw->inbuf, count); 415 EBCASC(raw->inbuf, count);
437 cchar = ctrlchar_handle(raw->inbuf, count, tty); 416 cchar = ctrlchar_handle(raw->inbuf, count, tty);
438 switch (cchar & CTRLCHAR_MASK) { 417 switch (cchar & CTRLCHAR_MASK) {
@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
481 raw->flags &= ~RAW3215_WORKING; 460 raw->flags &= ~RAW3215_WORKING;
482 raw3215_free_req(req); 461 raw3215_free_req(req);
483 } 462 }
484 raw->message = KERN_WARNING
485 "Spurious interrupt in in raw3215_irq "
486 "(dev sts 0x%2x, sch sts 0x%2x)";
487 raw->msg_dstat = dstat;
488 raw->msg_cstat = cstat;
489 tasklet_schedule(&raw->tasklet); 463 tasklet_schedule(&raw->tasklet);
490 } 464 }
491 return; 465 return;
@@ -883,7 +857,6 @@ con3215_init(void)
883 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); 857 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
884 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); 858 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
885 raw3215[0] = NULL; 859 raw3215[0] = NULL;
886 printk("Couldn't find a 3215 console device\n");
887 return -ENODEV; 860 return -ENODEV;
888 } 861 }
889 register_console(&con3215); 862 register_console(&con3215);
@@ -1157,7 +1130,6 @@ tty3215_init(void)
1157 tty_set_operations(driver, &tty3215_ops); 1130 tty_set_operations(driver, &tty3215_ops);
1158 ret = tty_register_driver(driver); 1131 ret = tty_register_driver(driver);
1159 if (ret) { 1132 if (ret) {
1160 printk("Couldn't register tty3215 driver\n");
1161 put_tty_driver(driver); 1133 put_tty_driver(driver);
1162 return ret; 1134 return ret;
1163 } 1135 }
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 0b040557db02..3c07974886ed 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -411,15 +411,15 @@ static int
411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) 411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
412{ 412{
413 /* Handle ATTN. Schedule tasklet to read aid. */ 413 /* Handle ATTN. Schedule tasklet to read aid. */
414 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 414 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
415 con3270_issue_read(cp); 415 con3270_issue_read(cp);
416 416
417 if (rq) { 417 if (rq) {
418 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 418 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
419 rq->rc = -EIO; 419 rq->rc = -EIO;
420 else 420 else
421 /* Normal end. Copy residual count. */ 421 /* Normal end. Copy residual count. */
422 rq->rescnt = irb->scsw.count; 422 rq->rescnt = irb->scsw.cmd.count;
423 } 423 }
424 return RAW3270_IO_DONE; 424 return RAW3270_IO_DONE;
425} 425}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index b5ecd59676c4..d18e6d2e0b49 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -217,17 +217,17 @@ static int
217fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) 217fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
218{ 218{
219 /* Handle ATTN. Set indication and wake waiters for attention. */ 219 /* Handle ATTN. Set indication and wake waiters for attention. */
220 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 220 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
221 fp->attention = 1; 221 fp->attention = 1;
222 wake_up(&fp->wait); 222 wake_up(&fp->wait);
223 } 223 }
224 224
225 if (rq) { 225 if (rq) {
226 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 226 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
227 rq->rc = -EIO; 227 rq->rc = -EIO;
228 else 228 else
229 /* Normal end. Copy residual count. */ 229 /* Normal end. Copy residual count. */
230 rq->rescnt = irb->scsw.count; 230 rq->rescnt = irb->scsw.cmd.count;
231 } 231 }
232 return RAW3270_IO_DONE; 232 return RAW3270_IO_DONE;
233} 233}
@@ -521,11 +521,8 @@ fs3270_init(void)
521 int rc; 521 int rc;
522 522
523 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); 523 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
524 if (rc) { 524 if (rc)
525 printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
526 IBM_FS3270_MAJOR, rc);
527 return rc; 525 return rc;
528 }
529 return 0; 526 return 0;
530} 527}
531 528
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index dc4710e64e0c..35fd8dfcaaa6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,9 +3,8 @@
3 * 3 *
4 * Character device driver for reading z/VM *MONITOR service records. 4 * Character device driver for reading z/VM *MONITOR service records.
5 * 5 *
6 * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright IBM Corp. 2004, 2008
7 * 7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */ 8 */
10 9
11#include <linux/module.h> 10#include <linux/module.h>
@@ -19,12 +18,11 @@
19#include <linux/ctype.h> 18#include <linux/ctype.h>
20#include <linux/spinlock.h> 19#include <linux/spinlock.h>
21#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/poll.h>
22#include <net/iucv/iucv.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
24#include <asm/extmem.h> 25#include <asm/extmem.h>
25#include <linux/poll.h>
26#include <net/iucv/iucv.h>
27
28 26
29//#define MON_DEBUG /* Debug messages on/off */ 27//#define MON_DEBUG /* Debug messages on/off */
30 28
@@ -153,10 +151,7 @@ static int mon_check_mca(struct mon_msg *monmsg)
153 (mon_mca_end(monmsg) > mon_dcss_end) || 151 (mon_mca_end(monmsg) > mon_dcss_end) ||
154 (mon_mca_start(monmsg) < mon_dcss_start) || 152 (mon_mca_start(monmsg) < mon_dcss_start) ||
155 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) 153 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
156 {
157 P_DEBUG("READ, IGNORED INVALID MCA\n\n");
158 return -EINVAL; 154 return -EINVAL;
159 }
160 return 0; 155 return 0;
161} 156}
162 157
@@ -165,10 +160,6 @@ static int mon_send_reply(struct mon_msg *monmsg,
165{ 160{
166 int rc; 161 int rc;
167 162
168 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
169 "0x%08X\n\n",
170 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
171
172 rc = iucv_message_reply(monpriv->path, &monmsg->msg, 163 rc = iucv_message_reply(monpriv->path, &monmsg->msg,
173 IUCV_IPRMDATA, NULL, 0); 164 IUCV_IPRMDATA, NULL, 0);
174 atomic_dec(&monpriv->msglim_count); 165 atomic_dec(&monpriv->msglim_count);
@@ -203,15 +194,12 @@ static struct mon_private *mon_alloc_mem(void)
203 struct mon_private *monpriv; 194 struct mon_private *monpriv;
204 195
205 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 196 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
206 if (!monpriv) { 197 if (!monpriv)
207 P_ERROR("no memory for monpriv\n");
208 return NULL; 198 return NULL;
209 }
210 for (i = 0; i < MON_MSGLIM; i++) { 199 for (i = 0; i < MON_MSGLIM; i++) {
211 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), 200 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
212 GFP_KERNEL); 201 GFP_KERNEL);
213 if (!monpriv->msg_array[i]) { 202 if (!monpriv->msg_array[i]) {
214 P_ERROR("open, no memory for msg_array\n");
215 mon_free_mem(monpriv); 203 mon_free_mem(monpriv);
216 return NULL; 204 return NULL;
217 } 205 }
@@ -219,41 +207,10 @@ static struct mon_private *mon_alloc_mem(void)
219 return monpriv; 207 return monpriv;
220} 208}
221 209
222static inline void mon_read_debug(struct mon_msg *monmsg,
223 struct mon_private *monpriv)
224{
225#ifdef MON_DEBUG
226 u8 msg_type[2], mca_type;
227 unsigned long records_len;
228
229 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
230
231 memcpy(msg_type, &monmsg->msg.class, 2);
232 EBCASC(msg_type, 2);
233 mca_type = mon_mca_type(monmsg, 0);
234 EBCASC(&mca_type, 1);
235
236 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
237 monpriv->read_index, monpriv->write_index);
238 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
239 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
240 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
241 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
242 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
243 P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
244 mon_mca_start(monmsg), mon_mca_end(monmsg));
245 P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
246 mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
247 if (mon_mca_size(monmsg) > 12)
248 P_DEBUG("READ, MORE THAN ONE MCA\n\n");
249#endif
250}
251
252static inline void mon_next_mca(struct mon_msg *monmsg) 210static inline void mon_next_mca(struct mon_msg *monmsg)
253{ 211{
254 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) 212 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
255 return; 213 return;
256 P_DEBUG("READ, NEXT MCA\n\n");
257 monmsg->mca_offset += 12; 214 monmsg->mca_offset += 12;
258 monmsg->pos = 0; 215 monmsg->pos = 0;
259} 216}
@@ -270,7 +227,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv)
270 monmsg->msglim_reached = 0; 227 monmsg->msglim_reached = 0;
271 monmsg->pos = 0; 228 monmsg->pos = 0;
272 monmsg->mca_offset = 0; 229 monmsg->mca_offset = 0;
273 P_WARNING("read, message limit reached\n");
274 monpriv->read_index = (monpriv->read_index + 1) % 230 monpriv->read_index = (monpriv->read_index + 1) %
275 MON_MSGLIM; 231 MON_MSGLIM;
276 atomic_dec(&monpriv->read_ready); 232 atomic_dec(&monpriv->read_ready);
@@ -287,10 +243,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
287{ 243{
288 struct mon_private *monpriv = path->private; 244 struct mon_private *monpriv = path->private;
289 245
290 P_DEBUG("IUCV connection completed\n");
291 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
292 "0x%02X, Sample = 0x%02X\n",
293 ipuser[0], ipuser[1], ipuser[2]);
294 atomic_set(&monpriv->iucv_connected, 1); 246 atomic_set(&monpriv->iucv_connected, 1);
295 wake_up(&mon_conn_wait_queue); 247 wake_up(&mon_conn_wait_queue);
296} 248}
@@ -311,7 +263,6 @@ static void mon_iucv_message_pending(struct iucv_path *path,
311{ 263{
312 struct mon_private *monpriv = path->private; 264 struct mon_private *monpriv = path->private;
313 265
314 P_DEBUG("IUCV message pending\n");
315 memcpy(&monpriv->msg_array[monpriv->write_index]->msg, 266 memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
316 msg, sizeof(*msg)); 267 msg, sizeof(*msg));
317 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { 268 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
@@ -377,7 +328,6 @@ static int mon_open(struct inode *inode, struct file *filp)
377 rc = -EIO; 328 rc = -EIO;
378 goto out_path; 329 goto out_path;
379 } 330 }
380 P_INFO("open, established connection to *MONITOR service\n\n");
381 filp->private_data = monpriv; 331 filp->private_data = monpriv;
382 unlock_kernel(); 332 unlock_kernel();
383 return nonseekable_open(inode, filp); 333 return nonseekable_open(inode, filp);
@@ -404,8 +354,6 @@ static int mon_close(struct inode *inode, struct file *filp)
404 rc = iucv_path_sever(monpriv->path, user_data_sever); 354 rc = iucv_path_sever(monpriv->path, user_data_sever);
405 if (rc) 355 if (rc)
406 P_ERROR("close, iucv_sever failed with rc = %i\n", rc); 356 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
407 else
408 P_INFO("close, terminated connection to *MONITOR service\n");
409 357
410 atomic_set(&monpriv->iucv_severed, 0); 358 atomic_set(&monpriv->iucv_severed, 0);
411 atomic_set(&monpriv->iucv_connected, 0); 359 atomic_set(&monpriv->iucv_connected, 0);
@@ -446,10 +394,8 @@ static ssize_t mon_read(struct file *filp, char __user *data,
446 monmsg = monpriv->msg_array[monpriv->read_index]; 394 monmsg = monpriv->msg_array[monpriv->read_index];
447 } 395 }
448 396
449 if (!monmsg->pos) { 397 if (!monmsg->pos)
450 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; 398 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
451 mon_read_debug(monmsg, monpriv);
452 }
453 if (mon_check_mca(monmsg)) 399 if (mon_check_mca(monmsg))
454 goto reply; 400 goto reply;
455 401
@@ -535,7 +481,6 @@ static int __init mon_init(void)
535 P_ERROR("failed to register with iucv driver\n"); 481 P_ERROR("failed to register with iucv driver\n");
536 return rc; 482 return rc;
537 } 483 }
538 P_INFO("open, registered with IUCV\n");
539 484
540 rc = segment_type(mon_dcss_name); 485 rc = segment_type(mon_dcss_name);
541 if (rc < 0) { 486 if (rc < 0) {
@@ -559,13 +504,8 @@ static int __init mon_init(void)
559 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 504 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
560 505
561 rc = misc_register(&mon_dev); 506 rc = misc_register(&mon_dev);
562 if (rc < 0 ) { 507 if (rc < 0 )
563 P_ERROR("misc_register failed, rc = %i\n", rc);
564 goto out; 508 goto out;
565 }
566 P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
567 mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
568 mon_dcss_end - mon_dcss_start + 1);
569 return 0; 509 return 0;
570 510
571out: 511out:
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 848ef7e8523f..81a96e019080 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
153 struct raw3270_request *rq; 153 struct raw3270_request *rq;
154 154
155 rq = alloc_bootmem_low(sizeof(struct raw3270)); 155 rq = alloc_bootmem_low(sizeof(struct raw3270));
156 if (!rq)
157 return ERR_PTR(-ENOMEM);
158 memset(rq, 0, sizeof(struct raw3270_request));
159 156
160 /* alloc output buffer. */ 157 /* alloc output buffer. */
161 if (size > 0) { 158 if (size > 0)
162 rq->buffer = alloc_bootmem_low(size); 159 rq->buffer = alloc_bootmem_low(size);
163 if (!rq->buffer) {
164 free_bootmem((unsigned long) rq,
165 sizeof(struct raw3270));
166 return ERR_PTR(-ENOMEM);
167 }
168 }
169 rq->size = size; 160 rq->size = size;
170 INIT_LIST_HEAD(&rq->list); 161 INIT_LIST_HEAD(&rq->list);
171 162
@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
372 363
373 if (IS_ERR(irb)) 364 if (IS_ERR(irb))
374 rc = RAW3270_IO_RETRY; 365 rc = RAW3270_IO_RETRY;
375 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 366 else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
376 rq->rc = -EIO; 367 rq->rc = -EIO;
377 rc = RAW3270_IO_DONE; 368 rc = RAW3270_IO_DONE;
378 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | 369 } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
379 DEV_STAT_UNIT_EXCEP)) { 370 DEV_STAT_UNIT_EXCEP)) {
380 /* Handle CE-DE-UE and subsequent UDE */ 371 /* Handle CE-DE-UE and subsequent UDE */
381 set_bit(RAW3270_FLAGS_BUSY, &rp->flags); 372 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
382 rc = RAW3270_IO_BUSY; 373 rc = RAW3270_IO_BUSY;
383 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { 374 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
384 /* Wait for UDE if busy flag is set. */ 375 /* Wait for UDE if busy flag is set. */
385 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 376 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
386 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); 377 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
387 /* Got it, now retry. */ 378 /* Got it, now retry. */
388 rc = RAW3270_IO_RETRY; 379 rc = RAW3270_IO_RETRY;
@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
497 * Unit-Check Processing: 488 * Unit-Check Processing:
498 * Expect Command Reject or Intervention Required. 489 * Expect Command Reject or Intervention Required.
499 */ 490 */
500 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 491 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
501 /* Request finished abnormally. */ 492 /* Request finished abnormally. */
502 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 493 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
503 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 494 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
505 } 496 }
506 } 497 }
507 if (rq) { 498 if (rq) {
508 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 499 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
509 if (irb->ecw[0] & SNS0_CMD_REJECT) 500 if (irb->ecw[0] & SNS0_CMD_REJECT)
510 rq->rc = -EOPNOTSUPP; 501 rq->rc = -EOPNOTSUPP;
511 else 502 else
512 rq->rc = -EIO; 503 rq->rc = -EIO;
513 } else 504 } else
514 /* Request finished normally. Copy residual count. */ 505 /* Request finished normally. Copy residual count. */
515 rq->rescnt = irb->scsw.count; 506 rq->rescnt = irb->scsw.cmd.count;
516 } 507 }
517 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 508 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
518 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); 509 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
519 wake_up(&raw3270_wait_queue); 510 wake_up(&raw3270_wait_queue);
520 } 511 }
@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp)
619 rp->cols = 132; 610 rp->cols = 132;
620 break; 611 break;
621 default: 612 default:
622 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
623 rc = -EOPNOTSUPP; 613 rc = -EOPNOTSUPP;
624 break; 614 break;
625 } 615 }
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 2c7a1ee6b041..3c8b25e6c345 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
506 if (scbuf->validity_sclp_send_mask) 506 if (scbuf->validity_sclp_send_mask)
507 sclp_send_mask = scbuf->sclp_send_mask; 507 sclp_send_mask = scbuf->sclp_send_mask;
508 spin_unlock_irqrestore(&sclp_lock, flags); 508 spin_unlock_irqrestore(&sclp_lock, flags);
509 if (scbuf->validity_sclp_active_facility_mask)
510 sclp_facilities = scbuf->sclp_active_facility_mask;
509 sclp_dispatch_state_change(); 511 sclp_dispatch_state_change();
510} 512}
511 513
@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code)
782 /* Is this the interrupt we are waiting for? */ 784 /* Is this the interrupt we are waiting for? */
783 if (finished_sccb == 0) 785 if (finished_sccb == 0)
784 return; 786 return;
785 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { 787 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
786 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " 788 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
787 "for buffer at 0x%x\n", finished_sccb); 789 finished_sccb);
788 return;
789 }
790 spin_lock(&sclp_lock); 790 spin_lock(&sclp_lock);
791 if (sclp_running_state == sclp_running_state_running) { 791 if (sclp_running_state == sclp_running_state_running) {
792 sclp_init_req.status = SCLP_REQ_DONE; 792 sclp_init_req.status = SCLP_REQ_DONE;
@@ -883,8 +883,6 @@ sclp_init(void)
883 unsigned long flags; 883 unsigned long flags;
884 int rc; 884 int rc;
885 885
886 if (!MACHINE_HAS_SCLP)
887 return -ENODEV;
888 spin_lock_irqsave(&sclp_lock, flags); 886 spin_lock_irqsave(&sclp_lock, flags);
889 /* Check for previous or running initialization */ 887 /* Check for previous or running initialization */
890 if (sclp_init_state != sclp_init_state_uninitialized) { 888 if (sclp_init_state != sclp_init_state_uninitialized) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b5c23396f8fe..0c2b77493db4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -11,6 +11,9 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/memory.h>
14#include <asm/chpid.h> 17#include <asm/chpid.h>
15#include <asm/sclp.h> 18#include <asm/sclp.h>
16#include "sclp.h" 19#include "sclp.h"
@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid;
43 46
44u64 sclp_facilities; 47u64 sclp_facilities;
45static u8 sclp_fac84; 48static u8 sclp_fac84;
49static unsigned long long rzm;
50static unsigned long long rnmax;
46 51
47static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 52static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
48{ 53{
@@ -62,7 +67,7 @@ out:
62 return rc; 67 return rc;
63} 68}
64 69
65void __init sclp_read_info_early(void) 70static void __init sclp_read_info_early(void)
66{ 71{
67 int rc; 72 int rc;
68 int i; 73 int i;
@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void)
92 97
93void __init sclp_facilities_detect(void) 98void __init sclp_facilities_detect(void)
94{ 99{
100 struct read_info_sccb *sccb;
101
102 sclp_read_info_early();
95 if (!early_read_info_sccb_valid) 103 if (!early_read_info_sccb_valid)
96 return; 104 return;
97 sclp_facilities = early_read_info_sccb.facilities; 105
98 sclp_fac84 = early_read_info_sccb.fac84; 106 sccb = &early_read_info_sccb;
107 sclp_facilities = sccb->facilities;
108 sclp_fac84 = sccb->fac84;
109 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
110 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
111 rzm <<= 20;
99} 112}
100 113
101unsigned long long __init sclp_memory_detect(void) 114unsigned long long sclp_get_rnmax(void)
102{ 115{
103 unsigned long long memsize; 116 return rnmax;
104 struct read_info_sccb *sccb; 117}
105 118
106 if (!early_read_info_sccb_valid) 119unsigned long long sclp_get_rzm(void)
107 return 0; 120{
108 sccb = &early_read_info_sccb; 121 return rzm;
109 if (sccb->rnsize)
110 memsize = sccb->rnsize << 20;
111 else
112 memsize = sccb->rnsize2 << 20;
113 if (sccb->rnmax)
114 memsize *= sccb->rnmax;
115 else
116 memsize *= sccb->rnmax2;
117 return memsize;
118} 122}
119 123
120/* 124/*
121 * This function will be called after sclp_memory_detect(), which gets called 125 * This function will be called after sclp_facilities_detect(), which gets
122 * early from early.c code. Therefore the sccb should have valid contents. 126 * called from early.c code. Therefore the sccb should have valid contents.
123 */ 127 */
124void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 128void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
125{ 129{
@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu)
278 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); 282 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
279} 283}
280 284
285#ifdef CONFIG_MEMORY_HOTPLUG
286
287static DEFINE_MUTEX(sclp_mem_mutex);
288static LIST_HEAD(sclp_mem_list);
289static u8 sclp_max_storage_id;
290static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
291
292struct memory_increment {
293 struct list_head list;
294 u16 rn;
295 int standby;
296 int usecount;
297};
298
299struct assign_storage_sccb {
300 struct sccb_header header;
301 u16 rn;
302} __packed;
303
304static unsigned long long rn2addr(u16 rn)
305{
306 return (unsigned long long) (rn - 1) * rzm;
307}
308
309static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
310{
311 struct assign_storage_sccb *sccb;
312 int rc;
313
314 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
315 if (!sccb)
316 return -ENOMEM;
317 sccb->header.length = PAGE_SIZE;
318 sccb->rn = rn;
319 rc = do_sync_request(cmd, sccb);
320 if (rc)
321 goto out;
322 switch (sccb->header.response_code) {
323 case 0x0020:
324 case 0x0120:
325 break;
326 default:
327 rc = -EIO;
328 break;
329 }
330out:
331 free_page((unsigned long) sccb);
332 return rc;
333}
334
335static int sclp_assign_storage(u16 rn)
336{
337 return do_assign_storage(0x000d0001, rn);
338}
339
340static int sclp_unassign_storage(u16 rn)
341{
342 return do_assign_storage(0x000c0001, rn);
343}
344
345struct attach_storage_sccb {
346 struct sccb_header header;
347 u16 :16;
348 u16 assigned;
349 u32 :32;
350 u32 entries[0];
351} __packed;
352
353static int sclp_attach_storage(u8 id)
354{
355 struct attach_storage_sccb *sccb;
356 int rc;
357 int i;
358
359 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
360 if (!sccb)
361 return -ENOMEM;
362 sccb->header.length = PAGE_SIZE;
363 rc = do_sync_request(0x00080001 | id << 8, sccb);
364 if (rc)
365 goto out;
366 switch (sccb->header.response_code) {
367 case 0x0020:
368 set_bit(id, sclp_storage_ids);
369 for (i = 0; i < sccb->assigned; i++)
370 sclp_unassign_storage(sccb->entries[i] >> 16);
371 break;
372 default:
373 rc = -EIO;
374 break;
375 }
376out:
377 free_page((unsigned long) sccb);
378 return rc;
379}
380
381static int sclp_mem_change_state(unsigned long start, unsigned long size,
382 int online)
383{
384 struct memory_increment *incr;
385 unsigned long long istart;
386 int rc = 0;
387
388 list_for_each_entry(incr, &sclp_mem_list, list) {
389 istart = rn2addr(incr->rn);
390 if (start + size - 1 < istart)
391 break;
392 if (start > istart + rzm - 1)
393 continue;
394 if (online) {
395 if (incr->usecount++)
396 continue;
397 /*
398 * Don't break the loop if one assign fails. Loop may
399 * be walked again on CANCEL and we can't save
400 * information if state changed before or not.
401 * So continue and increase usecount for all increments.
402 */
403 rc |= sclp_assign_storage(incr->rn);
404 } else {
405 if (--incr->usecount)
406 continue;
407 sclp_unassign_storage(incr->rn);
408 }
409 }
410 return rc ? -EIO : 0;
411}
412
413static int sclp_mem_notifier(struct notifier_block *nb,
414 unsigned long action, void *data)
415{
416 unsigned long start, size;
417 struct memory_notify *arg;
418 unsigned char id;
419 int rc = 0;
420
421 arg = data;
422 start = arg->start_pfn << PAGE_SHIFT;
423 size = arg->nr_pages << PAGE_SHIFT;
424 mutex_lock(&sclp_mem_mutex);
425 for (id = 0; id <= sclp_max_storage_id; id++)
426 if (!test_bit(id, sclp_storage_ids))
427 sclp_attach_storage(id);
428 switch (action) {
429 case MEM_ONLINE:
430 break;
431 case MEM_GOING_ONLINE:
432 rc = sclp_mem_change_state(start, size, 1);
433 break;
434 case MEM_CANCEL_ONLINE:
435 sclp_mem_change_state(start, size, 0);
436 break;
437 default:
438 rc = -EINVAL;
439 break;
440 }
441 mutex_unlock(&sclp_mem_mutex);
442 return rc ? NOTIFY_BAD : NOTIFY_OK;
443}
444
445static struct notifier_block sclp_mem_nb = {
446 .notifier_call = sclp_mem_notifier,
447};
448
449static void __init add_memory_merged(u16 rn)
450{
451 static u16 first_rn, num;
452 unsigned long long start, size;
453
454 if (rn && first_rn && (first_rn + num == rn)) {
455 num++;
456 return;
457 }
458 if (!first_rn)
459 goto skip_add;
460 start = rn2addr(first_rn);
461 size = (unsigned long long ) num * rzm;
462 if (start >= VMEM_MAX_PHYS)
463 goto skip_add;
464 if (start + size > VMEM_MAX_PHYS)
465 size = VMEM_MAX_PHYS - start;
466 add_memory(0, start, size);
467skip_add:
468 first_rn = rn;
469 num = 1;
470}
471
472static void __init sclp_add_standby_memory(void)
473{
474 struct memory_increment *incr;
475
476 list_for_each_entry(incr, &sclp_mem_list, list)
477 if (incr->standby)
478 add_memory_merged(incr->rn);
479 add_memory_merged(0);
480}
481
482static void __init insert_increment(u16 rn, int standby, int assigned)
483{
484 struct memory_increment *incr, *new_incr;
485 struct list_head *prev;
486 u16 last_rn;
487
488 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
489 if (!new_incr)
490 return;
491 new_incr->rn = rn;
492 new_incr->standby = standby;
493 last_rn = 0;
494 prev = &sclp_mem_list;
495 list_for_each_entry(incr, &sclp_mem_list, list) {
496 if (assigned && incr->rn > rn)
497 break;
498 if (!assigned && incr->rn - last_rn > 1)
499 break;
500 last_rn = incr->rn;
501 prev = &incr->list;
502 }
503 if (!assigned)
504 new_incr->rn = last_rn + 1;
505 if (new_incr->rn > rnmax) {
506 kfree(new_incr);
507 return;
508 }
509 list_add(&new_incr->list, prev);
510}
511
512struct read_storage_sccb {
513 struct sccb_header header;
514 u16 max_id;
515 u16 assigned;
516 u16 standby;
517 u16 :16;
518 u32 entries[0];
519} __packed;
520
521static int __init sclp_detect_standby_memory(void)
522{
523 struct read_storage_sccb *sccb;
524 int i, id, assigned, rc;
525
526 if (!early_read_info_sccb_valid)
527 return 0;
528 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
529 return 0;
530 rc = -ENOMEM;
531 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
532 if (!sccb)
533 goto out;
534 assigned = 0;
535 for (id = 0; id <= sclp_max_storage_id; id++) {
536 memset(sccb, 0, PAGE_SIZE);
537 sccb->header.length = PAGE_SIZE;
538 rc = do_sync_request(0x00040001 | id << 8, sccb);
539 if (rc)
540 goto out;
541 switch (sccb->header.response_code) {
542 case 0x0010:
543 set_bit(id, sclp_storage_ids);
544 for (i = 0; i < sccb->assigned; i++) {
545 if (!sccb->entries[i])
546 continue;
547 assigned++;
548 insert_increment(sccb->entries[i] >> 16, 0, 1);
549 }
550 break;
551 case 0x0310:
552 break;
553 case 0x0410:
554 for (i = 0; i < sccb->assigned; i++) {
555 if (!sccb->entries[i])
556 continue;
557 assigned++;
558 insert_increment(sccb->entries[i] >> 16, 1, 1);
559 }
560 break;
561 default:
562 rc = -EIO;
563 break;
564 }
565 if (!rc)
566 sclp_max_storage_id = sccb->max_id;
567 }
568 if (rc || list_empty(&sclp_mem_list))
569 goto out;
570 for (i = 1; i <= rnmax - assigned; i++)
571 insert_increment(0, 1, 0);
572 rc = register_memory_notifier(&sclp_mem_nb);
573 if (rc)
574 goto out;
575 sclp_add_standby_memory();
576out:
577 free_page((unsigned long) sccb);
578 return rc;
579}
580__initcall(sclp_detect_standby_memory);
581
582#endif /* CONFIG_MEMORY_HOTPLUG */
583
281/* 584/*
282 * Channel path configuration related functions. 585 * Channel path configuration related functions.
283 */ 586 */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index ead1043d788e..7e619c534bf4 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -14,14 +14,13 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/jiffies.h> 15#include <linux/jiffies.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/termios.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
19#include "sclp.h" 20#include "sclp.h"
20#include "sclp_rw.h" 21#include "sclp_rw.h"
21#include "sclp_tty.h" 22#include "sclp_tty.h"
22 23
23#define SCLP_CON_PRINT_HEADER "sclp console driver: "
24
25#define sclp_console_major 4 /* TTYAUX_MAJOR */ 24#define sclp_console_major 4 /* TTYAUX_MAJOR */
26#define sclp_console_minor 64 25#define sclp_console_minor 64
27#define sclp_console_name "ttyS" 26#define sclp_console_name "ttyS"
@@ -222,8 +221,6 @@ sclp_console_init(void)
222 INIT_LIST_HEAD(&sclp_con_pages); 221 INIT_LIST_HEAD(&sclp_con_pages);
223 for (i = 0; i < MAX_CONSOLE_PAGES; i++) { 222 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
224 page = alloc_bootmem_low_pages(PAGE_SIZE); 223 page = alloc_bootmem_low_pages(PAGE_SIZE);
225 if (page == NULL)
226 return -ENOMEM;
227 list_add_tail((struct list_head *) page, &sclp_con_pages); 224 list_add_tail((struct list_head *) page, &sclp_con_pages);
228 } 225 }
229 INIT_LIST_HEAD(&sclp_con_outqueue); 226 INIT_LIST_HEAD(&sclp_con_outqueue);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index ad05a87bc480..fff4ff485d9b 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/kthread.h>
11#include <linux/sysdev.h> 12#include <linux/sysdev.h>
12#include <linux/workqueue.h> 13#include <linux/workqueue.h>
13#include <asm/smp.h> 14#include <asm/smp.h>
@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
40 put_online_cpus(); 41 put_online_cpus();
41} 42}
42 43
43static void __ref sclp_cpu_change_notify(struct work_struct *work) 44static int sclp_cpu_kthread(void *data)
44{ 45{
45 smp_rescan_cpus(); 46 smp_rescan_cpus();
47 return 0;
48}
49
50static void __ref sclp_cpu_change_notify(struct work_struct *work)
51{
52 /* Can't call smp_rescan_cpus() from workqueue context since it may
53 * deadlock in case of cpu hotplug. So we have to create a kernel
54 * thread in order to call it.
55 */
56 kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
46} 57}
47 58
48static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 59static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void)
74 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); 85 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
75 86
76 rc = sclp_register(&sclp_conf_register); 87 rc = sclp_register(&sclp_conf_register);
77 if (rc) { 88 if (rc)
78 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
79 return rc; 89 return rc;
80 }
81 90
82 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { 91 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
83 printk(KERN_WARNING TAG "no configuration management.\n"); 92 printk(KERN_WARNING TAG "no configuration management.\n");
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 9f37456222e9..d887bd261d28 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -27,6 +27,8 @@
27#define CPI_LENGTH_NAME 8 27#define CPI_LENGTH_NAME 8
28#define CPI_LENGTH_LEVEL 16 28#define CPI_LENGTH_LEVEL 16
29 29
30static DEFINE_MUTEX(sclp_cpi_mutex);
31
30struct cpi_evbuf { 32struct cpi_evbuf {
31 struct evbuf_header header; 33 struct evbuf_header header;
32 u8 id_format; 34 u8 id_format;
@@ -124,21 +126,15 @@ static int cpi_req(void)
124 int response; 126 int response;
125 127
126 rc = sclp_register(&sclp_cpi_event); 128 rc = sclp_register(&sclp_cpi_event);
127 if (rc) { 129 if (rc)
128 printk(KERN_WARNING "cpi: could not register "
129 "to hardware console.\n");
130 goto out; 130 goto out;
131 }
132 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { 131 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n");
135 rc = -EOPNOTSUPP; 132 rc = -EOPNOTSUPP;
136 goto out_unregister; 133 goto out_unregister;
137 } 134 }
138 135
139 req = cpi_prepare_req(); 136 req = cpi_prepare_req();
140 if (IS_ERR(req)) { 137 if (IS_ERR(req)) {
141 printk(KERN_WARNING "cpi: could not allocate request\n");
142 rc = PTR_ERR(req); 138 rc = PTR_ERR(req);
143 goto out_unregister; 139 goto out_unregister;
144 } 140 }
@@ -148,10 +144,8 @@ static int cpi_req(void)
148 144
149 /* Add request to sclp queue */ 145 /* Add request to sclp queue */
150 rc = sclp_add_request(req); 146 rc = sclp_add_request(req);
151 if (rc) { 147 if (rc)
152 printk(KERN_WARNING "cpi: could not start request\n");
153 goto out_free_req; 148 goto out_free_req;
154 }
155 149
156 wait_for_completion(&completion); 150 wait_for_completion(&completion);
157 151
@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value)
223static ssize_t system_name_show(struct kobject *kobj, 217static ssize_t system_name_show(struct kobject *kobj,
224 struct kobj_attribute *attr, char *page) 218 struct kobj_attribute *attr, char *page)
225{ 219{
226 return snprintf(page, PAGE_SIZE, "%s\n", system_name); 220 int rc;
221
222 mutex_lock(&sclp_cpi_mutex);
223 rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
224 mutex_unlock(&sclp_cpi_mutex);
225 return rc;
227} 226}
228 227
229static ssize_t system_name_store(struct kobject *kobj, 228static ssize_t system_name_store(struct kobject *kobj,
@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj,
237 if (rc) 236 if (rc)
238 return rc; 237 return rc;
239 238
239 mutex_lock(&sclp_cpi_mutex);
240 set_string(system_name, buf); 240 set_string(system_name, buf);
241 mutex_unlock(&sclp_cpi_mutex);
241 242
242 return len; 243 return len;
243} 244}
@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr =
248static ssize_t sysplex_name_show(struct kobject *kobj, 249static ssize_t sysplex_name_show(struct kobject *kobj,
249 struct kobj_attribute *attr, char *page) 250 struct kobj_attribute *attr, char *page)
250{ 251{
251 return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); 252 int rc;
253
254 mutex_lock(&sclp_cpi_mutex);
255 rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
256 mutex_unlock(&sclp_cpi_mutex);
257 return rc;
252} 258}
253 259
254static ssize_t sysplex_name_store(struct kobject *kobj, 260static ssize_t sysplex_name_store(struct kobject *kobj,
@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj,
262 if (rc) 268 if (rc)
263 return rc; 269 return rc;
264 270
271 mutex_lock(&sclp_cpi_mutex);
265 set_string(sysplex_name, buf); 272 set_string(sysplex_name, buf);
273 mutex_unlock(&sclp_cpi_mutex);
266 274
267 return len; 275 return len;
268} 276}
@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr =
273static ssize_t system_type_show(struct kobject *kobj, 281static ssize_t system_type_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *page) 282 struct kobj_attribute *attr, char *page)
275{ 283{
276 return snprintf(page, PAGE_SIZE, "%s\n", system_type); 284 int rc;
285
286 mutex_lock(&sclp_cpi_mutex);
287 rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
288 mutex_unlock(&sclp_cpi_mutex);
289 return rc;
277} 290}
278 291
279static ssize_t system_type_store(struct kobject *kobj, 292static ssize_t system_type_store(struct kobject *kobj,
@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj,
287 if (rc) 300 if (rc)
288 return rc; 301 return rc;
289 302
303 mutex_lock(&sclp_cpi_mutex);
290 set_string(system_type, buf); 304 set_string(system_type, buf);
305 mutex_unlock(&sclp_cpi_mutex);
291 306
292 return len; 307 return len;
293} 308}
@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr =
298static ssize_t system_level_show(struct kobject *kobj, 313static ssize_t system_level_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *page) 314 struct kobj_attribute *attr, char *page)
300{ 315{
301 unsigned long long level = system_level; 316 unsigned long long level;
302 317
318 mutex_lock(&sclp_cpi_mutex);
319 level = system_level;
320 mutex_unlock(&sclp_cpi_mutex);
303 return snprintf(page, PAGE_SIZE, "%#018llx\n", level); 321 return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
304} 322}
305 323
@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj,
320 if (*endp) 338 if (*endp)
321 return -EINVAL; 339 return -EINVAL;
322 340
341 mutex_lock(&sclp_cpi_mutex);
323 system_level = level; 342 system_level = level;
324 343 mutex_unlock(&sclp_cpi_mutex);
325 return len; 344 return len;
326} 345}
327 346
@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj,
334{ 353{
335 int rc; 354 int rc;
336 355
356 mutex_lock(&sclp_cpi_mutex);
337 rc = cpi_req(); 357 rc = cpi_req();
358 mutex_unlock(&sclp_cpi_mutex);
338 if (rc) 359 if (rc)
339 return rc; 360 return rc;
340 361
@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
373 if (rc) 394 if (rc)
374 return rc; 395 return rc;
375 396
397 mutex_lock(&sclp_cpi_mutex);
376 set_string(system_name, system); 398 set_string(system_name, system);
377 set_string(sysplex_name, sysplex); 399 set_string(sysplex_name, sysplex);
378 set_string(system_type, type); 400 set_string(system_type, type);
379 system_level = level; 401 system_level = level;
380 402
381 return cpi_req(); 403 rc = cpi_req();
404 mutex_unlock(&sclp_cpi_mutex);
405
406 return rc;
382} 407}
383EXPORT_SYMBOL(sclp_cpi_set_data); 408EXPORT_SYMBOL(sclp_cpi_set_data);
384 409
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 45ff25e787cb..84c191c1cd62 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = {
51static int __init 51static int __init
52sclp_quiesce_init(void) 52sclp_quiesce_init(void)
53{ 53{
54 int rc; 54 return sclp_register(&sclp_quiesce_event);
55
56 rc = sclp_register(&sclp_quiesce_event);
57 if (rc)
58 printk(KERN_WARNING "sclp: could not register quiesce handler "
59 "(rc=%d)\n", rc);
60 return rc;
61} 55}
62 56
63module_init(sclp_quiesce_init); 57module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index da09781b32f7..710af42603f8 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -19,8 +19,6 @@
19#include "sclp.h" 19#include "sclp.h"
20#include "sclp_rw.h" 20#include "sclp_rw.h"
21 21
22#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
23
24/* 22/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 23 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the SCLP documentation) 24 * (as it is specified as the maximum size in the SCLP documentation)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 1c064976b32b..8b854857ba07 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void)
239 debug_register_view(sdias_dbf, &debug_sprintf_view); 239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6); 240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register); 241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) { 242 if (rc)
243 ERROR_MSG("sclp register failed\n");
244 return rc; 243 return rc;
245 }
246 init_waitqueue_head(&sdias_wq); 244 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n"); 245 TRACE("init done\n");
248 return 0; 246 return 0;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 40b11521cd20..434ba04b1309 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -13,7 +13,6 @@
13#include <linux/tty.h> 13#include <linux/tty.h>
14#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
15#include <linux/tty_flip.h> 15#include <linux/tty_flip.h>
16#include <linux/wait.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -25,8 +24,6 @@
25#include "sclp_rw.h" 24#include "sclp_rw.h"
26#include "sclp_tty.h" 25#include "sclp_tty.h"
27 26
28#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
29
30/* 27/*
31 * size of a buffer that collects single characters coming in 28 * size of a buffer that collects single characters coming in
32 * via sclp_tty_put_char() 29 * via sclp_tty_put_char()
@@ -50,8 +47,6 @@ static int sclp_tty_buffer_count;
50static struct sclp_buffer *sclp_ttybuf; 47static struct sclp_buffer *sclp_ttybuf;
51/* Timer for delayed output of console messages. */ 48/* Timer for delayed output of console messages. */
52static struct timer_list sclp_tty_timer; 49static struct timer_list sclp_tty_timer;
53/* Waitqueue to wait for buffers to get empty. */
54static wait_queue_head_t sclp_tty_waitq;
55 50
56static struct tty_struct *sclp_tty; 51static struct tty_struct *sclp_tty;
57static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; 52static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
@@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count;
59 54
60struct tty_driver *sclp_tty_driver; 55struct tty_driver *sclp_tty_driver;
61 56
62static struct sclp_ioctls sclp_ioctls; 57static int sclp_tty_tolower;
63static struct sclp_ioctls sclp_ioctls_init = 58static int sclp_tty_columns = 80;
64{ 59
65 8, /* 1 hor. tab. = 8 spaces */ 60#define SPACES_PER_TAB 8
66 0, /* no echo of input by this driver */ 61#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
67 80, /* 80 characters/line */
68 1, /* write after 1/10 s without final new line */
69 MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
70 MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
71 0, /* do not convert to lower case */
72 0x6c /* to seprate upper and lower case */
73 /* ('%' in EBCDIC) */
74};
75 62
76/* This routine is called whenever we try to open a SCLP terminal. */ 63/* This routine is called whenever we try to open a SCLP terminal. */
77static int 64static int
@@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
92 sclp_tty = NULL; 79 sclp_tty = NULL;
93} 80}
94 81
95/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
96static int
97sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
98 unsigned int cmd, unsigned long arg)
99{
100 unsigned long flags;
101 unsigned int obuf;
102 int check;
103 int rc;
104
105 if (tty->flags & (1 << TTY_IO_ERROR))
106 return -EIO;
107 rc = 0;
108 check = 0;
109 switch (cmd) {
110 case TIOCSCLPSHTAB:
111 /* set width of horizontal tab */
112 if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
113 rc = -EFAULT;
114 else
115 check = 1;
116 break;
117 case TIOCSCLPGHTAB:
118 /* get width of horizontal tab */
119 if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
120 rc = -EFAULT;
121 break;
122 case TIOCSCLPSECHO:
123 /* enable/disable echo of input */
124 if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
125 rc = -EFAULT;
126 break;
127 case TIOCSCLPGECHO:
128 /* Is echo of input enabled ? */
129 if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
130 rc = -EFAULT;
131 break;
132 case TIOCSCLPSCOLS:
133 /* set number of columns for output */
134 if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
135 rc = -EFAULT;
136 else
137 check = 1;
138 break;
139 case TIOCSCLPGCOLS:
140 /* get number of columns for output */
141 if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
142 rc = -EFAULT;
143 break;
144 case TIOCSCLPSNL:
145 /* enable/disable writing without final new line character */
146 if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
147 rc = -EFAULT;
148 break;
149 case TIOCSCLPGNL:
150 /* Is writing without final new line character enabled ? */
151 if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
152 rc = -EFAULT;
153 break;
154 case TIOCSCLPSOBUF:
155 /*
156 * set the maximum buffers size for output, will be rounded
157 * up to next 4kB boundary and stored as number of SCCBs
158 * (4kB Buffers) limitation: 256 x 4kB
159 */
160 if (get_user(obuf, (unsigned int __user *) arg) == 0) {
161 if (obuf & 0xFFF)
162 sclp_ioctls.max_sccb = (obuf >> 12) + 1;
163 else
164 sclp_ioctls.max_sccb = (obuf >> 12);
165 } else
166 rc = -EFAULT;
167 break;
168 case TIOCSCLPGOBUF:
169 /* get the maximum buffers size for output */
170 obuf = sclp_ioctls.max_sccb << 12;
171 if (put_user(obuf, (unsigned int __user *) arg))
172 rc = -EFAULT;
173 break;
174 case TIOCSCLPGKBUF:
175 /* get the number of buffers got from kernel at startup */
176 if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
177 rc = -EFAULT;
178 break;
179 case TIOCSCLPSCASE:
180 /* enable/disable conversion from upper to lower case */
181 if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
182 rc = -EFAULT;
183 break;
184 case TIOCSCLPGCASE:
185 /* Is conversion from upper to lower case of input enabled? */
186 if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
187 rc = -EFAULT;
188 break;
189 case TIOCSCLPSDELIM:
190 /*
191 * set special character used for separating upper and
192 * lower case, 0x00 disables this feature
193 */
194 if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
195 rc = -EFAULT;
196 break;
197 case TIOCSCLPGDELIM:
198 /*
199 * get special character used for separating upper and
200 * lower case, 0x00 disables this feature
201 */
202 if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
203 rc = -EFAULT;
204 break;
205 case TIOCSCLPSINIT:
206 /* set initial (default) sclp ioctls */
207 sclp_ioctls = sclp_ioctls_init;
208 check = 1;
209 break;
210 default:
211 rc = -ENOIOCTLCMD;
212 break;
213 }
214 if (check) {
215 spin_lock_irqsave(&sclp_tty_lock, flags);
216 if (sclp_ttybuf != NULL) {
217 sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
218 sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
219 }
220 spin_unlock_irqrestore(&sclp_tty_lock, flags);
221 }
222 return rc;
223}
224
225/* 82/*
226 * This routine returns the numbers of characters the tty driver 83 * This routine returns the numbers of characters the tty driver
227 * will accept for queuing to be written. This number is subject 84 * will accept for queuing to be written. This number is subject
@@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
268 struct sclp_buffer, list); 125 struct sclp_buffer, list);
269 spin_unlock_irqrestore(&sclp_tty_lock, flags); 126 spin_unlock_irqrestore(&sclp_tty_lock, flags);
270 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); 127 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
271 wake_up(&sclp_tty_waitq);
272 /* check if the tty needs a wake up call */ 128 /* check if the tty needs a wake up call */
273 if (sclp_tty != NULL) { 129 if (sclp_tty != NULL) {
274 tty_wakeup(sclp_tty); 130 tty_wakeup(sclp_tty);
@@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data)
316/* 172/*
317 * Write a string to the sclp tty. 173 * Write a string to the sclp tty.
318 */ 174 */
319static void 175static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
320sclp_tty_write_string(const unsigned char *str, int count)
321{ 176{
322 unsigned long flags; 177 unsigned long flags;
323 void *page; 178 void *page;
324 int written; 179 int written;
180 int overall_written;
325 struct sclp_buffer *buf; 181 struct sclp_buffer *buf;
326 182
327 if (count <= 0) 183 if (count <= 0)
328 return; 184 return 0;
185 overall_written = 0;
329 spin_lock_irqsave(&sclp_tty_lock, flags); 186 spin_lock_irqsave(&sclp_tty_lock, flags);
330 do { 187 do {
331 /* Create a sclp output buffer if none exists yet */ 188 /* Create a sclp output buffer if none exists yet */
332 if (sclp_ttybuf == NULL) { 189 if (sclp_ttybuf == NULL) {
333 while (list_empty(&sclp_tty_pages)) { 190 while (list_empty(&sclp_tty_pages)) {
334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 191 spin_unlock_irqrestore(&sclp_tty_lock, flags);
335 if (in_interrupt()) 192 if (may_fail)
336 sclp_sync_wait(); 193 goto out;
337 else 194 else
338 wait_event(sclp_tty_waitq, 195 sclp_sync_wait();
339 !list_empty(&sclp_tty_pages));
340 spin_lock_irqsave(&sclp_tty_lock, flags); 196 spin_lock_irqsave(&sclp_tty_lock, flags);
341 } 197 }
342 page = sclp_tty_pages.next; 198 page = sclp_tty_pages.next;
343 list_del((struct list_head *) page); 199 list_del((struct list_head *) page);
344 sclp_ttybuf = sclp_make_buffer(page, 200 sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
345 sclp_ioctls.columns, 201 SPACES_PER_TAB);
346 sclp_ioctls.htab);
347 } 202 }
348 /* try to write the string to the current output buffer */ 203 /* try to write the string to the current output buffer */
349 written = sclp_write(sclp_ttybuf, str, count); 204 written = sclp_write(sclp_ttybuf, str, count);
205 overall_written += written;
350 if (written == count) 206 if (written == count)
351 break; 207 break;
352 /* 208 /*
@@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count)
363 count -= written; 219 count -= written;
364 } while (count > 0); 220 } while (count > 0);
365 /* Setup timer to output current console buffer after 1/10 second */ 221 /* Setup timer to output current console buffer after 1/10 second */
366 if (sclp_ioctls.final_nl) { 222 if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
367 if (sclp_ttybuf != NULL && 223 !timer_pending(&sclp_tty_timer)) {
368 sclp_chars_in_buffer(sclp_ttybuf) != 0 && 224 init_timer(&sclp_tty_timer);
369 !timer_pending(&sclp_tty_timer)) { 225 sclp_tty_timer.function = sclp_tty_timeout;
370 init_timer(&sclp_tty_timer); 226 sclp_tty_timer.data = 0UL;
371 sclp_tty_timer.function = sclp_tty_timeout; 227 sclp_tty_timer.expires = jiffies + HZ/10;
372 sclp_tty_timer.data = 0UL; 228 add_timer(&sclp_tty_timer);
373 sclp_tty_timer.expires = jiffies + HZ/10;
374 add_timer(&sclp_tty_timer);
375 }
376 } else {
377 if (sclp_ttybuf != NULL &&
378 sclp_chars_in_buffer(sclp_ttybuf) != 0) {
379 buf = sclp_ttybuf;
380 sclp_ttybuf = NULL;
381 spin_unlock_irqrestore(&sclp_tty_lock, flags);
382 __sclp_ttybuf_emit(buf);
383 spin_lock_irqsave(&sclp_tty_lock, flags);
384 }
385 } 229 }
386 spin_unlock_irqrestore(&sclp_tty_lock, flags); 230 spin_unlock_irqrestore(&sclp_tty_lock, flags);
231out:
232 return overall_written;
387} 233}
388 234
389/* 235/*
@@ -395,11 +241,10 @@ static int
395sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) 241sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
396{ 242{
397 if (sclp_tty_chars_count > 0) { 243 if (sclp_tty_chars_count > 0) {
398 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 244 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
399 sclp_tty_chars_count = 0; 245 sclp_tty_chars_count = 0;
400 } 246 }
401 sclp_tty_write_string(buf, count); 247 return sclp_tty_write_string(buf, count, 1);
402 return count;
403} 248}
404 249
405/* 250/*
@@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
417{ 262{
418 sclp_tty_chars[sclp_tty_chars_count++] = ch; 263 sclp_tty_chars[sclp_tty_chars_count++] = ch;
419 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { 264 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
420 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 265 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
421 sclp_tty_chars_count = 0; 266 sclp_tty_chars_count = 0;
422 } return 1; 267 }
268 return 1;
423} 269}
424 270
425/* 271/*
@@ -430,7 +276,7 @@ static void
430sclp_tty_flush_chars(struct tty_struct *tty) 276sclp_tty_flush_chars(struct tty_struct *tty)
431{ 277{
432 if (sclp_tty_chars_count > 0) { 278 if (sclp_tty_chars_count > 0) {
433 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 279 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
434 sclp_tty_chars_count = 0; 280 sclp_tty_chars_count = 0;
435 } 281 }
436} 282}
@@ -469,7 +315,7 @@ static void
469sclp_tty_flush_buffer(struct tty_struct *tty) 315sclp_tty_flush_buffer(struct tty_struct *tty)
470{ 316{
471 if (sclp_tty_chars_count > 0) { 317 if (sclp_tty_chars_count > 0) {
472 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 318 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
473 sclp_tty_chars_count = 0; 319 sclp_tty_chars_count = 0;
474 } 320 }
475} 321}
@@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
517 * modifiy original string, 363 * modifiy original string,
518 * returns length of resulting string 364 * returns length of resulting string
519 */ 365 */
520static int 366static int sclp_switch_cases(unsigned char *buf, int count)
521sclp_switch_cases(unsigned char *buf, int count,
522 unsigned char delim, int tolower)
523{ 367{
524 unsigned char *ip, *op; 368 unsigned char *ip, *op;
525 int toggle; 369 int toggle;
@@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count,
529 ip = op = buf; 373 ip = op = buf;
530 while (count-- > 0) { 374 while (count-- > 0) {
531 /* compare with special character */ 375 /* compare with special character */
532 if (*ip == delim) { 376 if (*ip == CASE_DELIMITER) {
533 /* followed by another special character? */ 377 /* followed by another special character? */
534 if (count && ip[1] == delim) { 378 if (count && ip[1] == CASE_DELIMITER) {
535 /* 379 /*
536 * ... then put a single copy of the special 380 * ... then put a single copy of the special
537 * character to the output string 381 * character to the output string
@@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count,
550 /* not the special character */ 394 /* not the special character */
551 if (toggle) 395 if (toggle)
552 /* but case switching is on */ 396 /* but case switching is on */
553 if (tolower) 397 if (sclp_tty_tolower)
554 /* switch to uppercase */ 398 /* switch to uppercase */
555 *op++ = _ebc_toupper[(int) *ip++]; 399 *op++ = _ebc_toupper[(int) *ip++];
556 else 400 else
@@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end)
570 int count; 414 int count;
571 415
572 count = end - start; 416 count = end - start;
573 /* 417 if (sclp_tty_tolower)
574 * if set in ioctl convert EBCDIC to lower case
575 * (modify original input in SCCB)
576 */
577 if (sclp_ioctls.tolower)
578 EBC_TOLOWER(start, count); 418 EBC_TOLOWER(start, count);
579 419 count = sclp_switch_cases(start, count);
580 /*
581 * if set in ioctl find out characters in lower or upper case
582 * (depends on current case) separated by a special character,
583 * works on EBCDIC
584 */
585 if (sclp_ioctls.delim)
586 count = sclp_switch_cases(start, count,
587 sclp_ioctls.delim,
588 sclp_ioctls.tolower);
589
590 /* convert EBCDIC to ASCII (modify original input in SCCB) */ 420 /* convert EBCDIC to ASCII (modify original input in SCCB) */
591 sclp_ebcasc_str(start, count); 421 sclp_ebcasc_str(start, count);
592 422
593 /* if set in ioctl write operators input to console */
594 if (sclp_ioctls.echo)
595 sclp_tty_write(sclp_tty, start, count);
596
597 /* transfer input to high level driver */ 423 /* transfer input to high level driver */
598 sclp_tty_input(start, count); 424 sclp_tty_input(start, count);
599} 425}
@@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = {
717 .write_room = sclp_tty_write_room, 543 .write_room = sclp_tty_write_room,
718 .chars_in_buffer = sclp_tty_chars_in_buffer, 544 .chars_in_buffer = sclp_tty_chars_in_buffer,
719 .flush_buffer = sclp_tty_flush_buffer, 545 .flush_buffer = sclp_tty_flush_buffer,
720 .ioctl = sclp_tty_ioctl,
721}; 546};
722 547
723static int __init 548static int __init
@@ -736,9 +561,6 @@ sclp_tty_init(void)
736 561
737 rc = sclp_rw_init(); 562 rc = sclp_rw_init();
738 if (rc) { 563 if (rc) {
739 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
740 "could not register tty - "
741 "sclp_rw_init returned %d\n", rc);
742 put_tty_driver(driver); 564 put_tty_driver(driver);
743 return rc; 565 return rc;
744 } 566 }
@@ -754,7 +576,6 @@ sclp_tty_init(void)
754 } 576 }
755 INIT_LIST_HEAD(&sclp_tty_outqueue); 577 INIT_LIST_HEAD(&sclp_tty_outqueue);
756 spin_lock_init(&sclp_tty_lock); 578 spin_lock_init(&sclp_tty_lock);
757 init_waitqueue_head(&sclp_tty_waitq);
758 init_timer(&sclp_tty_timer); 579 init_timer(&sclp_tty_timer);
759 sclp_ttybuf = NULL; 580 sclp_ttybuf = NULL;
760 sclp_tty_buffer_count = 0; 581 sclp_tty_buffer_count = 0;
@@ -763,11 +584,10 @@ sclp_tty_init(void)
763 * save 4 characters for the CPU number 584 * save 4 characters for the CPU number
764 * written at start of each line by VM/CP 585 * written at start of each line by VM/CP
765 */ 586 */
766 sclp_ioctls_init.columns = 76; 587 sclp_tty_columns = 76;
767 /* case input lines to lowercase */ 588 /* case input lines to lowercase */
768 sclp_ioctls_init.tolower = 1; 589 sclp_tty_tolower = 1;
769 } 590 }
770 sclp_ioctls = sclp_ioctls_init;
771 sclp_tty_chars_count = 0; 591 sclp_tty_chars_count = 0;
772 sclp_tty = NULL; 592 sclp_tty = NULL;
773 593
@@ -792,9 +612,6 @@ sclp_tty_init(void)
792 tty_set_operations(driver, &sclp_ops); 612 tty_set_operations(driver, &sclp_ops);
793 rc = tty_register_driver(driver); 613 rc = tty_register_driver(driver);
794 if (rc) { 614 if (rc) {
795 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
796 "could not register tty - "
797 "tty_register_driver returned %d\n", rc);
798 put_tty_driver(driver); 615 put_tty_driver(driver);
799 return rc; 616 return rc;
800 } 617 }
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index 0ce2c1fc5340..4b965b22fecd 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -11,61 +11,8 @@
11#ifndef __SCLP_TTY_H__ 11#ifndef __SCLP_TTY_H__
12#define __SCLP_TTY_H__ 12#define __SCLP_TTY_H__
13 13
14#include <linux/ioctl.h>
15#include <linux/termios.h>
16#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
17 15
18/* This is the type of data structures storing sclp ioctl setting. */
19struct sclp_ioctls {
20 unsigned short htab;
21 unsigned char echo;
22 unsigned short columns;
23 unsigned char final_nl;
24 unsigned short max_sccb;
25 unsigned short kmem_sccb; /* can't be modified at run time */
26 unsigned char tolower;
27 unsigned char delim;
28};
29
30/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
31#define SCLP_IOCTL_LETTER 'B'
32
33/* set width of horizontal tabulator */
34#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
35/* enable/disable echo of input (independent from line discipline) */
36#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
37/* set number of colums for output */
38#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
39/* enable/disable writing without final new line character */
40#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
41/* set the maximum buffers size for output, rounded up to next 4kB boundary */
42#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
43/* set initial (default) sclp ioctls */
44#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
45/* enable/disable conversion from upper to lower case of input */
46#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
47/* set special character used for separating upper and lower case, */
48/* 0x00 disables this feature */
49#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
50
51/* get width of horizontal tabulator */
52#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
53/* Is echo of input enabled ? (independent from line discipline) */
54#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
55/* get number of colums for output */
56#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
57/* Is writing without final new line character enabled ? */
58#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
59/* get the maximum buffers size for output */
60#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
61/* Is conversion from upper to lower case of input enabled ? */
62#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
63/* get special character used for separating upper and lower case, */
64/* 0x00 disables this feature */
65#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
66/* get the number of buffers/pages got from kernel at startup */
67#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
68
69extern struct tty_driver *sclp_tty_driver; 16extern struct tty_driver *sclp_tty_driver;
70 17
71#endif /* __SCLP_TTY_H__ */ 18#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 3e577f655b18..ad51738c4261 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -27,7 +27,6 @@
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include "sclp.h" 28#include "sclp.h"
29 29
30#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
31#define SCLP_VT220_MAJOR TTY_MAJOR 30#define SCLP_VT220_MAJOR TTY_MAJOR
32#define SCLP_VT220_MINOR 65 31#define SCLP_VT220_MINOR 65
33#define SCLP_VT220_DRIVER_NAME "sclp_vt220" 32#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
@@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request;
82/* Number of characters in current request buffer */ 81/* Number of characters in current request buffer */
83static int sclp_vt220_buffered_chars; 82static int sclp_vt220_buffered_chars;
84 83
85/* Flag indicating whether this driver has already been initialized */ 84/* Counter controlling core driver initialization. */
86static int sclp_vt220_initialized = 0; 85static int __initdata sclp_vt220_init_count;
87 86
88/* Flag indicating that sclp_vt220_current_request should really 87/* Flag indicating that sclp_vt220_current_request should really
89 * have been already queued but wasn't because the SCLP was processing 88 * have been already queued but wasn't because the SCLP was processing
@@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
609 sclp_vt220_emit_current(); 608 sclp_vt220_emit_current();
610} 609}
611 610
612/* 611/* Release allocated pages. */
613 * Initialize all relevant components and register driver with system. 612static void __init __sclp_vt220_free_pages(void)
614 */
615static void __init __sclp_vt220_cleanup(void)
616{ 613{
617 struct list_head *page, *p; 614 struct list_head *page, *p;
618 615
@@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void)
623 else 620 else
624 free_bootmem((unsigned long) page, PAGE_SIZE); 621 free_bootmem((unsigned long) page, PAGE_SIZE);
625 } 622 }
626 if (!list_empty(&sclp_vt220_register.list))
627 sclp_unregister(&sclp_vt220_register);
628 sclp_vt220_initialized = 0;
629} 623}
630 624
631static int __init __sclp_vt220_init(void) 625/* Release memory and unregister from sclp core. Controlled by init counting -
626 * only the last invoker will actually perform these actions. */
627static void __init __sclp_vt220_cleanup(void)
628{
629 sclp_vt220_init_count--;
630 if (sclp_vt220_init_count != 0)
631 return;
632 sclp_unregister(&sclp_vt220_register);
633 __sclp_vt220_free_pages();
634}
635
636/* Allocate buffer pages and register with sclp core. Controlled by init
637 * counting - only the first invoker will actually perform these actions. */
638static int __init __sclp_vt220_init(int num_pages)
632{ 639{
633 void *page; 640 void *page;
634 int i; 641 int i;
635 int num_pages;
636 int rc; 642 int rc;
637 643
638 if (sclp_vt220_initialized) 644 sclp_vt220_init_count++;
645 if (sclp_vt220_init_count != 1)
639 return 0; 646 return 0;
640 sclp_vt220_initialized = 1;
641 spin_lock_init(&sclp_vt220_lock); 647 spin_lock_init(&sclp_vt220_lock);
642 INIT_LIST_HEAD(&sclp_vt220_empty); 648 INIT_LIST_HEAD(&sclp_vt220_empty);
643 INIT_LIST_HEAD(&sclp_vt220_outqueue); 649 INIT_LIST_HEAD(&sclp_vt220_outqueue);
@@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void)
649 sclp_vt220_flush_later = 0; 655 sclp_vt220_flush_later = 0;
650 656
651 /* Allocate pages for output buffering */ 657 /* Allocate pages for output buffering */
652 num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
653 for (i = 0; i < num_pages; i++) { 658 for (i = 0; i < num_pages; i++) {
654 if (slab_is_available()) 659 if (slab_is_available())
655 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 660 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
656 else 661 else
657 page = alloc_bootmem_low_pages(PAGE_SIZE); 662 page = alloc_bootmem_low_pages(PAGE_SIZE);
658 if (!page) { 663 if (!page) {
659 __sclp_vt220_cleanup(); 664 rc = -ENOMEM;
660 return -ENOMEM; 665 goto out;
661 } 666 }
662 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 667 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
663 } 668 }
664 rc = sclp_register(&sclp_vt220_register); 669 rc = sclp_register(&sclp_vt220_register);
670out:
665 if (rc) { 671 if (rc) {
666 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 672 __sclp_vt220_free_pages();
667 "could not register vt220 - " 673 sclp_vt220_init_count--;
668 "sclp_register returned %d\n", rc);
669 __sclp_vt220_cleanup();
670 } 674 }
671 return rc; 675 return rc;
672} 676}
@@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void)
689{ 693{
690 struct tty_driver *driver; 694 struct tty_driver *driver;
691 int rc; 695 int rc;
692 int cleanup;
693 696
694 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 697 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
695 * symmetry between VM and LPAR systems regarding ttyS1. */ 698 * symmetry between VM and LPAR systems regarding ttyS1. */
696 driver = alloc_tty_driver(1); 699 driver = alloc_tty_driver(1);
697 if (!driver) 700 if (!driver)
698 return -ENOMEM; 701 return -ENOMEM;
699 cleanup = !sclp_vt220_initialized; 702 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
700 rc = __sclp_vt220_init();
701 if (rc) 703 if (rc)
702 goto out_driver; 704 goto out_driver;
703 705
@@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void)
713 tty_set_operations(driver, &sclp_vt220_ops); 715 tty_set_operations(driver, &sclp_vt220_ops);
714 716
715 rc = tty_register_driver(driver); 717 rc = tty_register_driver(driver);
716 if (rc) { 718 if (rc)
717 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
718 "could not register tty - "
719 "tty_register_driver returned %d\n", rc);
720 goto out_init; 719 goto out_init;
721 }
722 sclp_vt220_driver = driver; 720 sclp_vt220_driver = driver;
723 return 0; 721 return 0;
724 722
725out_init: 723out_init:
726 if (cleanup) 724 __sclp_vt220_cleanup();
727 __sclp_vt220_cleanup();
728out_driver: 725out_driver:
729 put_tty_driver(driver); 726 put_tty_driver(driver);
730 return rc; 727 return rc;
@@ -773,10 +770,9 @@ sclp_vt220_con_init(void)
773{ 770{
774 int rc; 771 int rc;
775 772
776 INIT_LIST_HEAD(&sclp_vt220_register.list);
777 if (!CONSOLE_IS_SCLP) 773 if (!CONSOLE_IS_SCLP)
778 return 0; 774 return 0;
779 rc = __sclp_vt220_init(); 775 rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
780 if (rc) 776 if (rc)
781 return rc; 777 return rc;
782 /* Attach linux console */ 778 /* Attach linux console */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 874adf365e46..22ca34361ed7 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request)
196static int 196static int
197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) 197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
198{ 198{
199 if (irb->scsw.dstat == 0x85 /* READY */) { 199 if (irb->scsw.cmd.dstat == 0x85) { /* READY */
200 /* A medium was inserted in the drive. */ 200 /* A medium was inserted in the drive. */
201 DBF_EVENT(6, "xuud med\n"); 201 DBF_EVENT(6, "xuud med\n");
202 tape_34xx_delete_sbid_from(device, 0); 202 tape_34xx_delete_sbid_from(device, 0);
@@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
844 if (request == NULL) 844 if (request == NULL)
845 return tape_34xx_unsolicited_irq(device, irb); 845 return tape_34xx_unsolicited_irq(device, irb);
846 846
847 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 847 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
848 (irb->scsw.dstat & DEV_STAT_DEV_END) && 848 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
849 (request->op == TO_WRI)) { 849 (request->op == TO_WRI)) {
850 /* Write at end of volume */ 850 /* Write at end of volume */
851 PRINT_INFO("End of volume\n"); /* XXX */ 851 PRINT_INFO("End of volume\n"); /* XXX */
852 return tape_34xx_erp_failed(request, -ENOSPC); 852 return tape_34xx_erp_failed(request, -ENOSPC);
853 } 853 }
854 854
855 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 855 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
856 return tape_34xx_unit_check(device, request, irb); 856 return tape_34xx_unit_check(device, request, irb);
857 857
858 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 858 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
859 /* 859 /*
860 * A unit exception occurs on skipping over a tapemark block. 860 * A unit exception occurs on skipping over a tapemark block.
861 */ 861 */
862 if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 862 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
863 if (request->op == TO_BSB || request->op == TO_FSB) 863 if (request->op == TO_BSB || request->op == TO_FSB)
864 request->rescnt++; 864 request->rescnt++;
865 else 865 else
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 42ce7915fc5d..839987618ffd 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
837static int 837static int
838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) 838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
839{ 839{
840 if (irb->scsw.dstat == DEV_STAT_CHN_END) 840 if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
841 /* Probably result of halt ssch */ 841 /* Probably result of halt ssch */
842 return TAPE_IO_PENDING; 842 return TAPE_IO_PENDING;
843 else if (irb->scsw.dstat == 0x85) 843 else if (irb->scsw.cmd.dstat == 0x85)
844 /* Device Ready */ 844 /* Device Ready */
845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); 845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
846 else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 846 else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
847 tape_3590_schedule_work(device, TO_READ_ATTMSG); 847 tape_3590_schedule_work(device, TO_READ_ATTMSG);
848 } else { 848 } else {
849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
@@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1515 if (request == NULL) 1515 if (request == NULL)
1516 return tape_3590_unsolicited_irq(device, irb); 1516 return tape_3590_unsolicited_irq(device, irb);
1517 1517
1518 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 1518 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
1519 (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { 1519 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
1520 (request->op == TO_WRI)) {
1520 /* Write at end of volume */ 1521 /* Write at end of volume */
1521 DBF_EVENT(2, "End of volume\n"); 1522 DBF_EVENT(2, "End of volume\n");
1522 return tape_3590_erp_failed(device, request, irb, -ENOSPC); 1523 return tape_3590_erp_failed(device, request, irb, -ENOSPC);
1523 } 1524 }
1524 1525
1525 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 1526 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
1526 return tape_3590_unit_check(device, request, irb); 1527 return tape_3590_unit_check(device, request, irb);
1527 1528
1528 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 1529 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1529 if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { 1530 if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
1530 if (request->op == TO_FSB || request->op == TO_BSB) 1531 if (request->op == TO_FSB || request->op == TO_BSB)
1531 request->rescnt++; 1532 request->rescnt++;
1532 else 1533 else
@@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1536 return tape_3590_done(device, request); 1537 return tape_3590_done(device, request);
1537 } 1538 }
1538 1539
1539 if (irb->scsw.dstat & DEV_STAT_CHN_END) { 1540 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
1540 DBF_EVENT(2, "cannel end\n"); 1541 DBF_EVENT(2, "cannel end\n");
1541 return TAPE_IO_PENDING; 1542 return TAPE_IO_PENDING;
1542 } 1543 }
1543 1544
1544 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1545 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1545 DBF_EVENT(2, "Unit Attention when busy..\n"); 1546 DBF_EVENT(2, "Unit Attention when busy..\n");
1546 return TAPE_IO_PENDING; 1547 return TAPE_IO_PENDING;
1547 } 1548 }
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c20e3c548343..181a5441af16 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
839 839
840 PRINT_INFO("-------------------------------------------------\n"); 840 PRINT_INFO("-------------------------------------------------\n");
841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
842 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 842 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
844 if (request != NULL) 844 if (request != NULL)
845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
@@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
867 else 867 else
868 op = "---"; 868 op = "---";
869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
870 irb->scsw.dstat,irb->scsw.cstat); 870 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
872 sptr = (unsigned int *) irb->ecw; 872 sptr = (unsigned int *) irb->ecw;
873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
@@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1083 * error might still apply. So we just schedule the request to be 1083 * error might still apply. So we just schedule the request to be
1084 * started later. 1084 * started later.
1085 */ 1085 */
1086 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1086 if (irb->scsw.cmd.cc != 0 &&
1087 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1087 (request->status == TAPE_REQUEST_IN_IO)) { 1088 (request->status == TAPE_REQUEST_IN_IO)) {
1088 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1089 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1089 device->cdev_id, irb->scsw.cc, irb->scsw.fctl); 1090 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1090 request->status = TAPE_REQUEST_QUEUED; 1091 request->status = TAPE_REQUEST_QUEUED;
1091 schedule_delayed_work(&device->tape_dnr, HZ); 1092 schedule_delayed_work(&device->tape_dnr, HZ);
1092 return; 1093 return;
@@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 1095
1095 /* May be an unsolicited irq */ 1096 /* May be an unsolicited irq */
1096 if(request != NULL) 1097 if(request != NULL)
1097 request->rescnt = irb->scsw.count; 1098 request->rescnt = irb->scsw.cmd.count;
1098 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && 1099 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1099 !list_empty(&device->req_queue)) { 1100 !list_empty(&device->req_queue)) {
1100 /* Not Ready to Ready after long busy ? */ 1101 /* Not Ready to Ready after long busy ? */
1101 struct tape_request *req; 1102 struct tape_request *req;
@@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1111 return; 1112 return;
1112 } 1113 }
1113 } 1114 }
1114 if (irb->scsw.dstat != 0x0c) { 1115 if (irb->scsw.cmd.dstat != 0x0c) {
1115 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1116 /* Set the 'ONLINE' flag depending on sense byte 1 */
1116 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1117 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1117 device->tape_generic_status |= GMT_ONLINE(~0); 1118 device->tape_generic_status |= GMT_ONLINE(~0);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5043150019ac..a7fe6302c982 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -663,7 +663,7 @@ static int
663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) 663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
664{ 664{
665 /* Handle ATTN. Schedule tasklet to read aid. */ 665 /* Handle ATTN. Schedule tasklet to read aid. */
666 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 666 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
667 if (!tp->throttle) 667 if (!tp->throttle)
668 tty3270_issue_read(tp, 0); 668 tty3270_issue_read(tp, 0);
669 else 669 else
@@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
671 } 671 }
672 672
673 if (rq) { 673 if (rq) {
674 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 674 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
675 rq->rc = -EIO; 675 rq->rc = -EIO;
676 else 676 else
677 /* Normal end. Copy residual count. */ 677 /* Normal end. Copy residual count. */
678 rq->rescnt = irb->scsw.count; 678 rq->rescnt = irb->scsw.cmd.count;
679 } 679 }
680 return RAW3270_IO_DONE; 680 return RAW3270_IO_DONE;
681} 681}
@@ -1792,15 +1792,12 @@ static int __init tty3270_init(void)
1792 tty_set_operations(driver, &tty3270_ops); 1792 tty_set_operations(driver, &tty3270_ops);
1793 ret = tty_register_driver(driver); 1793 ret = tty_register_driver(driver);
1794 if (ret) { 1794 if (ret) {
1795 printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
1796 put_tty_driver(driver); 1795 put_tty_driver(driver);
1797 return ret; 1796 return ret;
1798 } 1797 }
1799 tty3270_driver = driver; 1798 tty3270_driver = driver;
1800 ret = raw3270_register_notifier(tty3270_notifier); 1799 ret = raw3270_register_notifier(tty3270_notifier);
1801 if (ret) { 1800 if (ret) {
1802 printk(KERN_ERR "tty3270 notifier registration failed "
1803 "with %d\n", ret);
1804 put_tty_driver(driver); 1801 put_tty_driver(driver);
1805 return ret; 1802 return ret;
1806 1803
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index bf3dc6e0e33a..09e7d9bf438b 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -65,30 +65,24 @@ static int vmcp_release(struct inode *inode, struct file *file)
65static ssize_t 65static ssize_t
66vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) 66vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
67{ 67{
68 size_t tocopy; 68 ssize_t ret;
69 size_t size;
69 struct vmcp_session *session; 70 struct vmcp_session *session;
70 71
71 session = (struct vmcp_session *)file->private_data; 72 session = file->private_data;
72 if (mutex_lock_interruptible(&session->mutex)) 73 if (mutex_lock_interruptible(&session->mutex))
73 return -ERESTARTSYS; 74 return -ERESTARTSYS;
74 if (!session->response) { 75 if (!session->response) {
75 mutex_unlock(&session->mutex); 76 mutex_unlock(&session->mutex);
76 return 0; 77 return 0;
77 } 78 }
78 if (*ppos > session->resp_size) { 79 size = min_t(size_t, session->resp_size, session->bufsize);
79 mutex_unlock(&session->mutex); 80 ret = simple_read_from_buffer(buff, count, ppos,
80 return 0; 81 session->response, size);
81 }
82 tocopy = min(session->resp_size - (size_t) (*ppos), count);
83 tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
84 82
85 if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
86 mutex_unlock(&session->mutex);
87 return -EFAULT;
88 }
89 mutex_unlock(&session->mutex); 83 mutex_unlock(&session->mutex);
90 *ppos += tocopy; 84
91 return tocopy; 85 return ret;
92} 86}
93 87
94static ssize_t 88static ssize_t
@@ -202,27 +196,23 @@ static int __init vmcp_init(void)
202 PRINT_WARN("z/VM CP interface is only available under z/VM\n"); 196 PRINT_WARN("z/VM CP interface is only available under z/VM\n");
203 return -ENODEV; 197 return -ENODEV;
204 } 198 }
199
205 vmcp_debug = debug_register("vmcp", 1, 1, 240); 200 vmcp_debug = debug_register("vmcp", 1, 1, 240);
206 if (!vmcp_debug) { 201 if (!vmcp_debug)
207 PRINT_ERR("z/VM CP interface not loaded. Could not register "
208 "debug feature\n");
209 return -ENOMEM; 202 return -ENOMEM;
210 } 203
211 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); 204 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
212 if (ret) { 205 if (ret) {
213 PRINT_ERR("z/VM CP interface not loaded. Could not register "
214 "debug feature view. Error code: %d\n", ret);
215 debug_unregister(vmcp_debug); 206 debug_unregister(vmcp_debug);
216 return ret; 207 return ret;
217 } 208 }
209
218 ret = misc_register(&vmcp_dev); 210 ret = misc_register(&vmcp_dev);
219 if (ret) { 211 if (ret) {
220 PRINT_ERR("z/VM CP interface not loaded. Could not register "
221 "misc device. Error code: %d\n", ret);
222 debug_unregister(vmcp_debug); 212 debug_unregister(vmcp_debug);
223 return ret; 213 return ret;
224 } 214 }
225 PRINT_INFO("z/VM CP interface loaded\n"); 215
226 return 0; 216 return 0;
227} 217}
228 218
@@ -230,7 +220,6 @@ static void __exit vmcp_exit(void)
230{ 220{
231 misc_deregister(&vmcp_dev); 221 misc_deregister(&vmcp_dev);
232 debug_unregister(vmcp_debug); 222 debug_unregister(vmcp_debug);
233 PRINT_INFO("z/VM CP interface unloaded.\n");
234} 223}
235 224
236module_init(vmcp_init); 225module_init(vmcp_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6f852fdb6d70..c31faefa2b3b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -217,9 +217,7 @@ static int vmlogrdr_get_recording_class_AB(void)
217 char *tail; 217 char *tail;
218 int len,i; 218 int len,i;
219 219
220 printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
221 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 220 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222 printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
223 len = strnlen(cp_response,sizeof(cp_response)); 221 len = strnlen(cp_response,sizeof(cp_response));
224 // now the parsing 222 // now the parsing
225 tail=strnchr(cp_response,len,'='); 223 tail=strnchr(cp_response,len,'=');
@@ -269,11 +267,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
269 logptr->recording_name, 267 logptr->recording_name,
270 qid_string); 268 qid_string);
271 269
272 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
273 cp_command);
274 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 270 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
275 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
276 cp_response);
277 } 271 }
278 272
279 memset(cp_command, 0x00, sizeof(cp_command)); 273 memset(cp_command, 0x00, sizeof(cp_command));
@@ -283,10 +277,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
283 onoff, 277 onoff,
284 qid_string); 278 qid_string);
285 279
286 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
287 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 280 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
288 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
289 cp_response);
290 /* The recording command will usually answer with 'Command complete' 281 /* The recording command will usually answer with 'Command complete'
291 * on success, but when the specific service was never connected 282 * on success, but when the specific service was never connected
292 * before then there might be an additional informational message 283 * before then there might be an additional informational message
@@ -573,10 +564,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
573 "RECORDING %s PURGE ", 564 "RECORDING %s PURGE ",
574 priv->recording_name); 565 priv->recording_name);
575 566
576 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
577 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 567 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
578 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
579 cp_response);
580 568
581 return count; 569 return count;
582} 570}
@@ -688,28 +676,20 @@ static int vmlogrdr_register_driver(void)
688 676
689 /* Register with iucv driver */ 677 /* Register with iucv driver */
690 ret = iucv_register(&vmlogrdr_iucv_handler, 1); 678 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
691 if (ret) { 679 if (ret)
692 printk (KERN_ERR "vmlogrdr: failed to register with "
693 "iucv driver\n");
694 goto out; 680 goto out;
695 }
696 681
697 ret = driver_register(&vmlogrdr_driver); 682 ret = driver_register(&vmlogrdr_driver);
698 if (ret) { 683 if (ret)
699 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
700 goto out_iucv; 684 goto out_iucv;
701 }
702 685
703 ret = driver_create_file(&vmlogrdr_driver, 686 ret = driver_create_file(&vmlogrdr_driver,
704 &driver_attr_recording_status); 687 &driver_attr_recording_status);
705 if (ret) { 688 if (ret)
706 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
707 goto out_driver; 689 goto out_driver;
708 }
709 690
710 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 691 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
711 if (IS_ERR(vmlogrdr_class)) { 692 if (IS_ERR(vmlogrdr_class)) {
712 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
713 ret = PTR_ERR(vmlogrdr_class); 693 ret = PTR_ERR(vmlogrdr_class);
714 vmlogrdr_class = NULL; 694 vmlogrdr_class = NULL;
715 goto out_attr; 695 goto out_attr;
@@ -877,12 +857,10 @@ static int __init vmlogrdr_init(void)
877 rc = vmlogrdr_register_cdev(dev); 857 rc = vmlogrdr_register_cdev(dev);
878 if (rc) 858 if (rc)
879 goto cleanup; 859 goto cleanup;
880 printk (KERN_INFO "vmlogrdr: driver loaded\n");
881 return 0; 860 return 0;
882 861
883cleanup: 862cleanup:
884 vmlogrdr_cleanup(); 863 vmlogrdr_cleanup();
885 printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
886 return rc; 864 return rc;
887} 865}
888 866
@@ -890,7 +868,6 @@ cleanup:
890static void __exit vmlogrdr_exit(void) 868static void __exit vmlogrdr_exit(void)
891{ 869{
892 vmlogrdr_cleanup(); 870 vmlogrdr_cleanup();
893 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
894 return; 871 return;
895} 872}
896 873
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 61549987ed70..0a9f1cccbe58 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -278,7 +278,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
278 struct urdev *urd; 278 struct urdev *urd;
279 279
280 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 280 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
281 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); 281 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
282 irb->scsw.cmd.count);
282 283
283 if (!intparm) { 284 if (!intparm) {
284 TRACE("ur_int_handler: unsolicited interrupt\n"); 285 TRACE("ur_int_handler: unsolicited interrupt\n");
@@ -289,7 +290,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
289 /* On special conditions irb is an error pointer */ 290 /* On special conditions irb is an error pointer */
290 if (IS_ERR(irb)) 291 if (IS_ERR(irb))
291 urd->io_request_rc = PTR_ERR(irb); 292 urd->io_request_rc = PTR_ERR(irb);
292 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 293 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
293 urd->io_request_rc = 0; 294 urd->io_request_rc = 0;
294 else 295 else
295 urd->io_request_rc = -EIO; 296 urd->io_request_rc = -EIO;
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index ee80e936d514..21a2a829bf4e 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -93,23 +93,15 @@ static int vmwdt_keepalive(void)
93 93
94 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; 94 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
95 ret = __diag288(func, vmwdt_interval, ebc_cmd, len); 95 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
96 WARN_ON(ret != 0);
96 kfree(ebc_cmd); 97 kfree(ebc_cmd);
97
98 if (ret) {
99 printk(KERN_WARNING "%s: problem setting interval %d, "
100 "cmd %s\n", __func__, vmwdt_interval,
101 vmwdt_cmd);
102 }
103 return ret; 98 return ret;
104} 99}
105 100
106static int vmwdt_disable(void) 101static int vmwdt_disable(void)
107{ 102{
108 int ret = __diag288(wdt_cancel, 0, "", 0); 103 int ret = __diag288(wdt_cancel, 0, "", 0);
109 if (ret) { 104 WARN_ON(ret != 0);
110 printk(KERN_WARNING "%s: problem disabling watchdog\n",
111 __func__);
112 }
113 return ret; 105 return ret;
114} 106}
115 107
@@ -122,10 +114,8 @@ static int __init vmwdt_probe(void)
122 static char __initdata ebc_begin[] = { 114 static char __initdata ebc_begin[] = {
123 194, 197, 199, 201, 213 115 194, 197, 199, 201, 213
124 }; 116 };
125 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { 117 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
126 printk(KERN_INFO "z/VM watchdog not available\n");
127 return -EINVAL; 118 return -EINVAL;
128 }
129 return vmwdt_disable(); 119 return vmwdt_disable();
130} 120}
131 121
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bbbd14e9d48f..047dd92ae804 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch)
223 /* get info for boot cpu from lowcore, stored in the HSA */ 223 /* get info for boot cpu from lowcore, stored in the HSA */
224 224
225 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 225 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
226 if (!sa) { 226 if (!sa)
227 ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
228 return -ENOMEM; 227 return -ENOMEM;
229 }
230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
231 ERROR_MSG("could not copy from HSA\n"); 229 TRACE("could not copy from HSA\n");
232 kfree(sa); 230 kfree(sa);
233 return -EIO; 231 return -EIO;
234 } 232 }
@@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map)
511 */ 509 */
512static int __init sys_info_init(enum arch_id arch) 510static int __init sys_info_init(enum arch_id arch)
513{ 511{
512 int rc;
513
514 switch (arch) { 514 switch (arch) {
515 case ARCH_S390X: 515 case ARCH_S390X:
516 MSG("DETECTED 'S390X (64 bit) OS'\n"); 516 MSG("DETECTED 'S390X (64 bit) OS'\n");
@@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch)
529 return -EINVAL; 529 return -EINVAL;
530 } 530 }
531 sys_info.arch = arch; 531 sys_info.arch = arch;
532 if (init_cpu_info(arch)) { 532 rc = init_cpu_info(arch);
533 ERROR_MSG("get cpu info failed\n"); 533 if (rc)
534 return -ENOMEM; 534 return rc;
535 }
536 sys_info.mem_size = real_memory_size; 535 sys_info.mem_size = real_memory_size;
537 536
538 return 0; 537 return 0;
@@ -544,12 +543,12 @@ static int __init check_sdias(void)
544 543
545 rc = sclp_sdias_blk_count(); 544 rc = sclp_sdias_blk_count();
546 if (rc < 0) { 545 if (rc < 0) {
547 ERROR_MSG("Could not determine HSA size\n"); 546 TRACE("Could not determine HSA size\n");
548 return rc; 547 return rc;
549 } 548 }
550 act_hsa_size = (rc - 1) * PAGE_SIZE; 549 act_hsa_size = (rc - 1) * PAGE_SIZE;
551 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { 550 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
552 ERROR_MSG("HSA size too small: %i\n", act_hsa_size); 551 TRACE("HSA size too small: %i\n", act_hsa_size);
553 return -EINVAL; 552 return -EINVAL;
554 } 553 }
555 return 0; 554 return 0;
@@ -590,16 +589,12 @@ static int __init zcore_init(void)
590 goto fail; 589 goto fail;
591 590
592 rc = check_sdias(); 591 rc = check_sdias();
593 if (rc) { 592 if (rc)
594 ERROR_MSG("Dump initialization failed\n");
595 goto fail; 593 goto fail;
596 }
597 594
598 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
599 if (rc) { 596 if (rc)
600 ERROR_MSG("sdial memcpy for arch id failed\n");
601 goto fail; 597 goto fail;
602 }
603 598
604#ifndef __s390x__ 599#ifndef __s390x__
605 if (arch == ARCH_S390X) { 600 if (arch == ARCH_S390X) {
@@ -610,10 +605,8 @@ static int __init zcore_init(void)
610#endif 605#endif
611 606
612 rc = sys_info_init(arch); 607 rc = sys_info_init(arch);
613 if (rc) { 608 if (rc)
614 ERROR_MSG("arch init failed\n");
615 goto fail; 609 goto fail;
616 }
617 610
618 zcore_header_init(arch, &zcore_header); 611 zcore_header_init(arch, &zcore_header);
619 612
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index cfaf77b320f5..91e9e3f3073a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,9 +2,11 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
9obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
10obj-$(CONFIG_QDIO) += qdio.o 12obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index b7a07a866291..fe6cea15bbaf 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -15,6 +15,7 @@
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h> 17#include <asm/airq.h>
18#include <asm/isc.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -33,15 +34,15 @@ struct airq_t {
33 void *drv_data; 34 void *drv_data;
34}; 35};
35 36
36static union indicator_t indicators; 37static union indicator_t indicators[MAX_ISC];
37static struct airq_t *airqs[NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
38 39
39static int register_airq(struct airq_t *airq) 40static int register_airq(struct airq_t *airq, u8 isc)
40{ 41{
41 int i; 42 int i;
42 43
43 for (i = 0; i < NR_AIRQS; i++) 44 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq)) 45 if (!cmpxchg(&airqs[isc][i], NULL, airq))
45 return i; 46 return i;
46 return -ENOMEM; 47 return -ENOMEM;
47} 48}
@@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq)
50 * s390_register_adapter_interrupt() - register adapter interrupt handler 51 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered 52 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler 53 * @drv_data: driver data passed with each call to the handler
54 * @isc: isc for which the handler should be called
53 * 55 *
54 * Returns: 56 * Returns:
55 * Pointer to the indicator to be used on success 57 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed 58 * ERR_PTR() if registration failed
57 */ 59 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler, 60void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data) 61 void *drv_data, u8 isc)
60{ 62{
61 struct airq_t *airq; 63 struct airq_t *airq;
62 char dbf_txt[16]; 64 char dbf_txt[16];
63 int ret; 65 int ret;
64 66
67 if (isc > MAX_ISC)
68 return ERR_PTR(-EINVAL);
65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); 69 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
66 if (!airq) { 70 if (!airq) {
67 ret = -ENOMEM; 71 ret = -ENOMEM;
@@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
69 } 73 }
70 airq->handler = handler; 74 airq->handler = handler;
71 airq->drv_data = drv_data; 75 airq->drv_data = drv_data;
72 ret = register_airq(airq); 76
73 if (ret < 0) 77 ret = register_airq(airq, isc);
74 kfree(airq);
75out: 78out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); 79 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt); 80 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0) 81 if (ret < 0) {
82 kfree(airq);
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 else 84 } else
81 return &indicators.byte[ret]; 85 return &indicators[isc].byte[ret];
82} 86}
83EXPORT_SYMBOL(s390_register_adapter_interrupt); 87EXPORT_SYMBOL(s390_register_adapter_interrupt);
84 88
85/** 89/**
86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler 90 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered 91 * @ind: indicator for which the handler is to be unregistered
92 * @isc: interruption subclass
88 */ 93 */
89void s390_unregister_adapter_interrupt(void *ind) 94void s390_unregister_adapter_interrupt(void *ind, u8 isc)
90{ 95{
91 struct airq_t *airq; 96 struct airq_t *airq;
92 char dbf_txt[16]; 97 char dbf_txt[16];
93 int i; 98 int i;
94 99
95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); 100 i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); 101 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt); 102 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0; 103 indicators[isc].byte[i] = 0;
99 airq = xchg(&airqs[i], NULL); 104 airq = xchg(&airqs[isc][i], NULL);
100 /* 105 /*
101 * Allow interrupts to complete. This will ensure that the airq handle 106 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler. 107 * is no longer referenced by any interrupt handler.
@@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108 113
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) 114#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
110 115
111void do_adapter_IO(void) 116void do_adapter_IO(u8 isc)
112{ 117{
113 int w; 118 int w;
114 int i; 119 int i;
@@ -120,22 +125,22 @@ void do_adapter_IO(void)
120 * fetch operations. 125 * fetch operations.
121 */ 126 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) { 127 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w]; 128 word = indicators[isc].word[w];
124 i = w * NR_AIRQS_PER_WORD; 129 i = w * NR_AIRQS_PER_WORD;
125 /* 130 /*
126 * Check bytes within word for active indicators. 131 * Check bytes within word for active indicators.
127 */ 132 */
128 while (word) { 133 while (word) {
129 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
130 airq = airqs[i]; 135 airq = airqs[isc][i];
131 if (likely(airq)) 136 if (likely(airq))
132 airq->handler(&indicators.byte[i], 137 airq->handler(&indicators[isc].byte[i],
133 airq->drv_data); 138 airq->drv_data);
134 else 139 else
135 /* 140 /*
136 * Reset ill-behaved indicator. 141 * Reset ill-behaved indicator.
137 */ 142 */
138 indicators.byte[i] = 0; 143 indicators[isc].byte[i] = 0;
139 } 144 }
140 word <<= 8; 145 word <<= 8;
141 i++; 146 i++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 297cdceb0ca4..db00b0591733 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -18,6 +18,7 @@
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20 20
21#include "../s390mach.h"
21#include "cio.h" 22#include "cio.h"
22#include "css.h" 23#include "css.h"
23#include "ioasm.h" 24#include "ioasm.h"
@@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
94 } 95 }
95 return opm; 96 return opm;
96} 97}
98EXPORT_SYMBOL_GPL(chp_get_sch_opm);
97 99
98/** 100/**
99 * chp_is_registered - check if a channel-path is registered 101 * chp_is_registered - check if a channel-path is registered
@@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
121 CIO_TRACE_EVENT(2, dbf_text); 123 CIO_TRACE_EVENT(2, dbf_text);
122 124
123 status = chp_get_status(chpid); 125 status = chp_get_status(chpid);
124 if (!on && !status) { 126 if (!on && !status)
125 printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", 127 return 0;
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129 128
130 set_chp_logically_online(chpid, on); 129 set_chp_logically_online(chpid, on);
131 chsc_chp_vary(chpid, on); 130 chsc_chp_vary(chpid, on);
@@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
141{ 140{
142 struct channel_path *chp; 141 struct channel_path *chp;
143 struct device *device; 142 struct device *device;
144 unsigned int size;
145 143
146 device = container_of(kobj, struct device, kobj); 144 device = container_of(kobj, struct device, kobj);
147 chp = to_channelpath(device); 145 chp = to_channelpath(device);
148 if (!chp->cmg_chars) 146 if (!chp->cmg_chars)
149 return 0; 147 return 0;
150 148
151 size = sizeof(struct cmg_chars); 149 return memory_read_from_buffer(buf, count, &off,
152 150 chp->cmg_chars, sizeof(struct cmg_chars));
153 if (off > size)
154 return 0;
155 if (off + count > size)
156 count = size - off;
157 memcpy(buf, chp->cmg_chars + off, count);
158 return count;
159} 151}
160 152
161static struct bin_attribute chp_measurement_chars_attr = { 153static struct bin_attribute chp_measurement_chars_attr = {
@@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid)
405 chpid.id); 397 chpid.id);
406 398
407 /* Obtain channel path description and fill it in. */ 399 /* Obtain channel path description and fill it in. */
408 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
409 if (ret) 401 if (ret)
410 goto out_free; 402 goto out_free;
411 if ((chp->desc.flags & 0x80) == 0) { 403 if ((chp->desc.flags & 0x80) == 0) {
@@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid)
413 goto out_free; 405 goto out_free;
414 } 406 }
415 /* Get channel-measurement characteristics. */ 407 /* Get channel-measurement characteristics. */
416 if (css_characteristics_avail && css_chsc_characteristics.scmc 408 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
417 && css_chsc_characteristics.secm) {
418 ret = chsc_get_channel_measurement_chars(chp); 409 ret = chsc_get_channel_measurement_chars(chp);
419 if (ret) 410 if (ret)
420 goto out_free; 411 goto out_free;
@@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid)
476 467
477/** 468/**
478 * chp_process_crw - process channel-path status change 469 * chp_process_crw - process channel-path status change
479 * @id: channel-path ID number 470 * @crw0: channel report-word to handler
480 * @status: non-zero if channel-path has become available, zero otherwise 471 * @crw1: second channel-report word (always NULL)
472 * @overflow: crw overflow indication
481 * 473 *
482 * Handle channel-report-words indicating that the status of a channel-path 474 * Handle channel-report-words indicating that the status of a channel-path
483 * has changed. 475 * has changed.
484 */ 476 */
485void chp_process_crw(int id, int status) 477static void chp_process_crw(struct crw *crw0, struct crw *crw1,
478 int overflow)
486{ 479{
487 struct chp_id chpid; 480 struct chp_id chpid;
488 481
482 if (overflow) {
483 css_schedule_eval_all();
484 return;
485 }
486 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
487 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
488 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
489 crw0->erc, crw0->rsid);
490 /*
491 * Check for solicited machine checks. These are
492 * created by reset channel path and need not be
493 * handled here.
494 */
495 if (crw0->slct) {
496 CIO_CRW_EVENT(2, "solicited machine check for "
497 "channel path %02X\n", crw0->rsid);
498 return;
499 }
489 chp_id_init(&chpid); 500 chp_id_init(&chpid);
490 chpid.id = id; 501 chpid.id = crw0->rsid;
491 if (status) { 502 switch (crw0->erc) {
503 case CRW_ERC_IPARM: /* Path has come. */
492 if (!chp_is_registered(chpid)) 504 if (!chp_is_registered(chpid))
493 chp_new(chpid); 505 chp_new(chpid);
494 chsc_chp_online(chpid); 506 chsc_chp_online(chpid);
495 } else 507 break;
508 case CRW_ERC_PERRI: /* Path has gone. */
509 case CRW_ERC_PERRN:
496 chsc_chp_offline(chpid); 510 chsc_chp_offline(chpid);
511 break;
512 default:
513 CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
514 crw0->erc);
515 }
497} 516}
498 517
518int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
519{
520 int i;
521 int mask;
522
523 for (i = 0; i < 8; i++) {
524 mask = 0x80 >> i;
525 if (!(ssd->path_mask & mask))
526 continue;
527 if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
528 continue;
529 if ((ssd->fla_valid_mask & mask) &&
530 ((ssd->fla[i] & link->fla_mask) != link->fla))
531 continue;
532 return mask;
533 }
534 return 0;
535}
536EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
537
499static inline int info_bit_num(struct chp_id id) 538static inline int info_bit_num(struct chp_id id)
500{ 539{
501 return id.id + id.cssid * (__MAX_CHPID + 1); 540 return id.id + id.cssid * (__MAX_CHPID + 1);
@@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work)
575{ 614{
576 struct chp_id chpid; 615 struct chp_id chpid;
577 enum cfg_task_t t; 616 enum cfg_task_t t;
617 int rc;
578 618
579 mutex_lock(&cfg_lock); 619 mutex_lock(&cfg_lock);
580 t = cfg_none; 620 t = cfg_none;
@@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work)
589 629
590 switch (t) { 630 switch (t) {
591 case cfg_configure: 631 case cfg_configure:
592 sclp_chp_configure(chpid); 632 rc = sclp_chp_configure(chpid);
593 info_expire(); 633 if (rc)
594 chsc_chp_online(chpid); 634 CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
635 "%d\n", chpid.cssid, chpid.id, rc);
636 else {
637 info_expire();
638 chsc_chp_online(chpid);
639 }
595 break; 640 break;
596 case cfg_deconfigure: 641 case cfg_deconfigure:
597 sclp_chp_deconfigure(chpid); 642 rc = sclp_chp_deconfigure(chpid);
598 info_expire(); 643 if (rc)
599 chsc_chp_offline(chpid); 644 CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
645 "%d\n", chpid.cssid, chpid.id, rc);
646 else {
647 info_expire();
648 chsc_chp_offline(chpid);
649 }
600 break; 650 break;
601 case cfg_none: 651 case cfg_none:
602 /* Get updated information after last change. */ 652 /* Get updated information after last change. */
@@ -654,10 +704,16 @@ static int cfg_wait_idle(void)
654static int __init chp_init(void) 704static int __init chp_init(void)
655{ 705{
656 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret;
657 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret)
711 return ret;
658 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
659 if (!chp_wq) 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH);
660 return -ENOMEM; 715 return -ENOMEM;
716 }
661 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
662 init_waitqueue_head(&cfg_wait_queue); 718 init_waitqueue_head(&cfg_wait_queue);
663 if (info_update()) 719 if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 65286563c592..26c3d2246176 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -12,12 +12,24 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <asm/chpid.h> 13#include <asm/chpid.h>
14#include "chsc.h" 14#include "chsc.h"
15#include "css.h"
15 16
16#define CHP_STATUS_STANDBY 0 17#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1 18#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2 19#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3 20#define CHP_STATUS_NOT_RECOGNIZED 3
20 21
22#define CHP_ONLINE 0
23#define CHP_OFFLINE 1
24#define CHP_VARY_ON 2
25#define CHP_VARY_OFF 3
26
27struct chp_link {
28 struct chp_id chpid;
29 u32 fla_mask;
30 u16 fla;
31};
32
21static inline int chp_test_bit(u8 *bitmap, int num) 33static inline int chp_test_bit(u8 *bitmap, int num)
22{ 34{
23 int byte = num >> 3; 35 int byte = num >> 3;
@@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch); 54u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid); 55int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid); 56void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp); 57void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp); 58int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid); 59int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure); 60void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid); 61void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid); 62int chp_info_get_status(struct chp_id chpid);
52 63int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
53#endif /* S390_CHP_H */ 64#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 5de86908b0d0..65264a38057d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2008
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -16,7 +15,9 @@
16 15
17#include <asm/cio.h> 16#include <asm/cio.h>
18#include <asm/chpid.h> 17#include <asm/chpid.h>
18#include <asm/chsc.h>
19 19
20#include "../s390mach.h"
20#include "css.h" 21#include "css.h"
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -127,77 +128,12 @@ out_free:
127 return ret; 128 return ret;
128} 129}
129 130
130static int check_for_io_on_path(struct subchannel *sch, int mask)
131{
132 int cc;
133
134 cc = stsch(sch->schid, &sch->schib);
135 if (cc)
136 return 0;
137 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138 return 1;
139 return 0;
140}
141
142static void terminate_internal_io(struct subchannel *sch)
143{
144 if (cio_clear(sch)) {
145 /* Recheck device in case clear failed. */
146 sch->lpm = 0;
147 if (device_trigger_verify(sch) != 0)
148 css_schedule_eval(sch->schid);
149 return;
150 }
151 /* Request retry of internal operation. */
152 device_set_intretry(sch);
153 /* Call handler. */
154 if (sch->driver && sch->driver->termination)
155 sch->driver->termination(sch);
156}
157
158static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 131static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159{ 132{
160 int j;
161 int mask;
162 struct chp_id *chpid = data;
163 struct schib schib;
164
165 for (j = 0; j < 8; j++) {
166 mask = 0x80 >> j;
167 if ((sch->schib.pmcw.pim & mask) &&
168 (sch->schib.pmcw.chpid[j] == chpid->id))
169 break;
170 }
171 if (j >= 8)
172 return 0;
173
174 spin_lock_irq(sch->lock); 133 spin_lock_irq(sch->lock);
175 134 if (sch->driver && sch->driver->chp_event)
176 stsch(sch->schid, &schib); 135 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
177 if (!css_sch_is_valid(&schib))
178 goto out_unreg;
179 memcpy(&sch->schib, &schib, sizeof(struct schib));
180 /* Check for single path devices. */
181 if (sch->schib.pmcw.pim == 0x80)
182 goto out_unreg;
183
184 if (check_for_io_on_path(sch, mask)) {
185 if (device_is_online(sch))
186 device_kill_io(sch);
187 else {
188 terminate_internal_io(sch);
189 /* Re-start path verification. */
190 if (sch->driver && sch->driver->verify)
191 sch->driver->verify(sch);
192 }
193 } else {
194 /* trigger path verification. */
195 if (sch->driver && sch->driver->verify)
196 sch->driver->verify(sch);
197 else if (sch->lpm == mask)
198 goto out_unreg; 136 goto out_unreg;
199 }
200
201 spin_unlock_irq(sch->lock); 137 spin_unlock_irq(sch->lock);
202 return 0; 138 return 0;
203 139
@@ -211,15 +147,18 @@ out_unreg:
211void chsc_chp_offline(struct chp_id chpid) 147void chsc_chp_offline(struct chp_id chpid)
212{ 148{
213 char dbf_txt[15]; 149 char dbf_txt[15];
150 struct chp_link link;
214 151
215 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 152 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216 CIO_TRACE_EVENT(2, dbf_txt); 153 CIO_TRACE_EVENT(2, dbf_txt);
217 154
218 if (chp_get_status(chpid) <= 0) 155 if (chp_get_status(chpid) <= 0)
219 return; 156 return;
157 memset(&link, 0, sizeof(struct chp_link));
158 link.chpid = chpid;
220 /* Wait until previous actions have settled. */ 159 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path(); 160 css_wait_for_slow_path();
222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 161 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
223} 162}
224 163
225static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 164static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
@@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
242 return 0; 181 return 0;
243} 182}
244 183
245struct res_acc_data {
246 struct chp_id chpid;
247 u32 fla_mask;
248 u16 fla;
249};
250
251static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 struct res_acc_data *data)
253{
254 int i;
255 int mask;
256
257 for (i = 0; i < 8; i++) {
258 mask = 0x80 >> i;
259 if (!(ssd->path_mask & mask))
260 continue;
261 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262 continue;
263 if ((ssd->fla_valid_mask & mask) &&
264 ((ssd->fla[i] & data->fla_mask) != data->fla))
265 continue;
266 return mask;
267 }
268 return 0;
269}
270
271static int __s390_process_res_acc(struct subchannel *sch, void *data) 184static int __s390_process_res_acc(struct subchannel *sch, void *data)
272{ 185{
273 int chp_mask, old_lpm;
274 struct res_acc_data *res_data = data;
275
276 spin_lock_irq(sch->lock); 186 spin_lock_irq(sch->lock);
277 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 187 if (sch->driver && sch->driver->chp_event)
278 if (chp_mask == 0) 188 sch->driver->chp_event(sch, data, CHP_ONLINE);
279 goto out;
280 if (stsch(sch->schid, &sch->schib))
281 goto out;
282 old_lpm = sch->lpm;
283 sch->lpm = ((sch->schib.pmcw.pim &
284 sch->schib.pmcw.pam &
285 sch->schib.pmcw.pom)
286 | chp_mask) & sch->opm;
287 if (!old_lpm && sch->lpm)
288 device_trigger_reprobe(sch);
289 else if (sch->driver && sch->driver->verify)
290 sch->driver->verify(sch);
291out:
292 spin_unlock_irq(sch->lock); 189 spin_unlock_irq(sch->lock);
293 190
294 return 0; 191 return 0;
295} 192}
296 193
297static void s390_process_res_acc (struct res_acc_data *res_data) 194static void s390_process_res_acc(struct chp_link *link)
298{ 195{
299 char dbf_txt[15]; 196 char dbf_txt[15];
300 197
301 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, 198 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
302 res_data->chpid.id); 199 link->chpid.id);
303 CIO_TRACE_EVENT( 2, dbf_txt); 200 CIO_TRACE_EVENT( 2, dbf_txt);
304 if (res_data->fla != 0) { 201 if (link->fla != 0) {
305 sprintf(dbf_txt, "fla%x", res_data->fla); 202 sprintf(dbf_txt, "fla%x", link->fla);
306 CIO_TRACE_EVENT( 2, dbf_txt); 203 CIO_TRACE_EVENT( 2, dbf_txt);
307 } 204 }
308 /* Wait until previous actions have settled. */ 205 /* Wait until previous actions have settled. */
@@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
315 * will we have to do. 212 * will we have to do.
316 */ 213 */
317 for_each_subchannel_staged(__s390_process_res_acc, 214 for_each_subchannel_staged(__s390_process_res_acc,
318 s390_process_res_acc_new_sch, res_data); 215 s390_process_res_acc_new_sch, link);
319} 216}
320 217
321static int 218static int
@@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
388 285
389static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 286static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
390{ 287{
391 struct res_acc_data res_data; 288 struct chp_link link;
392 struct chp_id chpid; 289 struct chp_id chpid;
393 int status; 290 int status;
394 291
@@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
404 chp_new(chpid); 301 chp_new(chpid);
405 else if (!status) 302 else if (!status)
406 return; 303 return;
407 memset(&res_data, 0, sizeof(struct res_acc_data)); 304 memset(&link, 0, sizeof(struct chp_link));
408 res_data.chpid = chpid; 305 link.chpid = chpid;
409 if ((sei_area->vf & 0xc0) != 0) { 306 if ((sei_area->vf & 0xc0) != 0) {
410 res_data.fla = sei_area->fla; 307 link.fla = sei_area->fla;
411 if ((sei_area->vf & 0xc0) == 0xc0) 308 if ((sei_area->vf & 0xc0) == 0xc0)
412 /* full link address */ 309 /* full link address */
413 res_data.fla_mask = 0xffff; 310 link.fla_mask = 0xffff;
414 else 311 else
415 /* link address */ 312 /* link address */
416 res_data.fla_mask = 0xff00; 313 link.fla_mask = 0xff00;
417 } 314 }
418 s390_process_res_acc(&res_data); 315 s390_process_res_acc(&link);
419} 316}
420 317
421struct chp_config_data { 318struct chp_config_data {
@@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
480 } 377 }
481} 378}
482 379
483void chsc_process_crw(void) 380static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
484{ 381{
485 struct chsc_sei_area *sei_area; 382 struct chsc_sei_area *sei_area;
486 383
384 if (overflow) {
385 css_schedule_eval_all();
386 return;
387 }
388 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
389 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
390 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
391 crw0->erc, crw0->rsid);
487 if (!sei_page) 392 if (!sei_page)
488 return; 393 return;
489 /* Access to sei_page is serialized through machine check handler 394 /* Access to sei_page is serialized through machine check handler
490 * thread, so no need for locking. */ 395 * thread, so no need for locking. */
491 sei_area = sei_page; 396 sei_area = sei_page;
492 397
493 CIO_TRACE_EVENT( 2, "prcss"); 398 CIO_TRACE_EVENT(2, "prcss");
494 do { 399 do {
495 memset(sei_area, 0, sizeof(*sei_area)); 400 memset(sei_area, 0, sizeof(*sei_area));
496 sei_area->request.length = 0x0010; 401 sei_area->request.length = 0x0010;
@@ -509,114 +414,36 @@ void chsc_process_crw(void)
509 } while (sei_area->flags & 0x80); 414 } while (sei_area->flags & 0x80);
510} 415}
511 416
512static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513{
514 struct schib schib;
515
516 if (stsch_err(schid, &schib))
517 /* We're through */
518 return -ENXIO;
519
520 /* Put it on the slow path. */
521 css_schedule_eval(schid);
522 return 0;
523}
524
525
526static int __chp_add(struct subchannel *sch, void *data)
527{
528 int i, mask;
529 struct chp_id *chpid = data;
530
531 spin_lock_irq(sch->lock);
532 for (i=0; i<8; i++) {
533 mask = 0x80 >> i;
534 if ((sch->schib.pmcw.pim & mask) &&
535 (sch->schib.pmcw.chpid[i] == chpid->id))
536 break;
537 }
538 if (i==8) {
539 spin_unlock_irq(sch->lock);
540 return 0;
541 }
542 if (stsch(sch->schid, &sch->schib)) {
543 spin_unlock_irq(sch->lock);
544 css_schedule_eval(sch->schid);
545 return 0;
546 }
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
549 sch->schib.pmcw.pom)
550 | mask) & sch->opm;
551
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
554
555 spin_unlock_irq(sch->lock);
556
557 return 0;
558}
559
560void chsc_chp_online(struct chp_id chpid) 417void chsc_chp_online(struct chp_id chpid)
561{ 418{
562 char dbf_txt[15]; 419 char dbf_txt[15];
420 struct chp_link link;
563 421
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 422 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt); 423 CIO_TRACE_EVENT(2, dbf_txt);
566 424
567 if (chp_get_status(chpid) != 0) { 425 if (chp_get_status(chpid) != 0) {
426 memset(&link, 0, sizeof(struct chp_link));
427 link.chpid = chpid;
568 /* Wait until previous actions have settled. */ 428 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path(); 429 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 430 for_each_subchannel_staged(__s390_process_res_acc, NULL,
571 &chpid); 431 &link);
572 } 432 }
573} 433}
574 434
575static void __s390_subchannel_vary_chpid(struct subchannel *sch, 435static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on) 436 struct chp_id chpid, int on)
577{ 437{
578 int chp, old_lpm;
579 int mask;
580 unsigned long flags; 438 unsigned long flags;
439 struct chp_link link;
581 440
441 memset(&link, 0, sizeof(struct chp_link));
442 link.chpid = chpid;
582 spin_lock_irqsave(sch->lock, flags); 443 spin_lock_irqsave(sch->lock, flags);
583 old_lpm = sch->lpm; 444 if (sch->driver && sch->driver->chp_event)
584 for (chp = 0; chp < 8; chp++) { 445 sch->driver->chp_event(sch, &link,
585 mask = 0x80 >> chp; 446 on ? CHP_VARY_ON : CHP_VARY_OFF);
586 if (!(sch->ssd_info.path_mask & mask))
587 continue;
588 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589 continue;
590
591 if (on) {
592 sch->opm |= mask;
593 sch->lpm |= mask;
594 if (!old_lpm)
595 device_trigger_reprobe(sch);
596 else if (sch->driver && sch->driver->verify)
597 sch->driver->verify(sch);
598 break;
599 }
600 sch->opm &= ~mask;
601 sch->lpm &= ~mask;
602 if (check_for_io_on_path(sch, mask)) {
603 if (device_is_online(sch))
604 /* Path verification is done after killing. */
605 device_kill_io(sch);
606 else {
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch);
609 /* Re-start path verification. */
610 if (sch->driver && sch->driver->verify)
611 sch->driver->verify(sch);
612 }
613 } else if (!sch->lpm) {
614 if (device_trigger_verify(sch) != 0)
615 css_schedule_eval(sch->schid);
616 } else if (sch->driver && sch->driver->verify)
617 sch->driver->verify(sch);
618 break;
619 }
620 spin_unlock_irqrestore(sch->lock, flags); 447 spin_unlock_irqrestore(sch->lock, flags);
621} 448}
622 449
@@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
656 */ 483 */
657int chsc_chp_vary(struct chp_id chpid, int on) 484int chsc_chp_vary(struct chp_id chpid, int on)
658{ 485{
486 struct chp_link link;
487
488 memset(&link, 0, sizeof(struct chp_link));
489 link.chpid = chpid;
659 /* Wait until previous actions have settled. */ 490 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path(); 491 css_wait_for_slow_path();
661 /* 492 /*
@@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
664 495
665 if (on) 496 if (on)
666 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 497 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
667 __s390_vary_chpid_on, &chpid); 498 __s390_vary_chpid_on, &link);
668 else 499 else
669 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 500 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
670 NULL, &chpid); 501 NULL, &link);
671 502
672 return 0; 503 return 0;
673} 504}
@@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable)
797 return ret; 628 return ret;
798} 629}
799 630
800int chsc_determine_channel_path_description(struct chp_id chpid, 631int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
801 struct channel_path_desc *desc) 632 int c, int m,
633 struct chsc_response_struct *resp)
802{ 634{
803 int ccode, ret; 635 int ccode, ret;
804 636
805 struct { 637 struct {
806 struct chsc_header request; 638 struct chsc_header request;
807 u32 : 24; 639 u32 : 2;
640 u32 m : 1;
641 u32 c : 1;
642 u32 fmt : 4;
643 u32 cssid : 8;
644 u32 : 4;
645 u32 rfmt : 4;
808 u32 first_chpid : 8; 646 u32 first_chpid : 8;
809 u32 : 24; 647 u32 : 24;
810 u32 last_chpid : 8; 648 u32 last_chpid : 8;
811 u32 zeroes1; 649 u32 zeroes1;
812 struct chsc_header response; 650 struct chsc_header response;
813 u32 zeroes2; 651 u8 data[PAGE_SIZE - 20];
814 struct channel_path_desc desc;
815 } __attribute__ ((packed)) *scpd_area; 652 } __attribute__ ((packed)) *scpd_area;
816 653
654 if ((rfmt == 1) && !css_general_characteristics.fcs)
655 return -EINVAL;
656 if ((rfmt == 2) && !css_general_characteristics.cib)
657 return -EINVAL;
817 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 658 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
818 if (!scpd_area) 659 if (!scpd_area)
819 return -ENOMEM; 660 return -ENOMEM;
@@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
821 scpd_area->request.length = 0x0010; 662 scpd_area->request.length = 0x0010;
822 scpd_area->request.code = 0x0002; 663 scpd_area->request.code = 0x0002;
823 664
665 scpd_area->cssid = chpid.cssid;
824 scpd_area->first_chpid = chpid.id; 666 scpd_area->first_chpid = chpid.id;
825 scpd_area->last_chpid = chpid.id; 667 scpd_area->last_chpid = chpid.id;
668 scpd_area->m = m;
669 scpd_area->c = c;
670 scpd_area->fmt = fmt;
671 scpd_area->rfmt = rfmt;
826 672
827 ccode = chsc(scpd_area); 673 ccode = chsc(scpd_area);
828 if (ccode > 0) { 674 if (ccode > 0) {
@@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
833 ret = chsc_error_from_response(scpd_area->response.code); 679 ret = chsc_error_from_response(scpd_area->response.code);
834 if (ret == 0) 680 if (ret == 0)
835 /* Success. */ 681 /* Success. */
836 memcpy(desc, &scpd_area->desc, 682 memcpy(resp, &scpd_area->response, scpd_area->response.length);
837 sizeof(struct channel_path_desc));
838 else 683 else
839 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 684 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840 scpd_area->response.code); 685 scpd_area->response.code);
@@ -842,6 +687,25 @@ out:
842 free_page((unsigned long)scpd_area); 687 free_page((unsigned long)scpd_area);
843 return ret; 688 return ret;
844} 689}
690EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
691
692int chsc_determine_base_channel_path_desc(struct chp_id chpid,
693 struct channel_path_desc *desc)
694{
695 struct chsc_response_struct *chsc_resp;
696 int ret;
697
698 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
699 if (!chsc_resp)
700 return -ENOMEM;
701 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
702 if (ret)
703 goto out_free;
704 memcpy(desc, &chsc_resp->data, chsc_resp->length);
705out_free:
706 kfree(chsc_resp);
707 return ret;
708}
845 709
846static void 710static void
847chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 711chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
@@ -937,15 +801,23 @@ out:
937 801
938int __init chsc_alloc_sei_area(void) 802int __init chsc_alloc_sei_area(void)
939{ 803{
804 int ret;
805
940 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 806 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
941 if (!sei_page) 807 if (!sei_page) {
942 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 808 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943 "chsc machine checks!\n"); 809 "chsc machine checks!\n");
944 return (sei_page ? 0 : -ENOMEM); 810 return -ENOMEM;
811 }
812 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
813 if (ret)
814 kfree(sei_page);
815 return ret;
945} 816}
946 817
947void __init chsc_free_sei_area(void) 818void __init chsc_free_sei_area(void)
948{ 819{
820 s390_unregister_crw_handler(CRW_RSC_CSS);
949 kfree(sei_page); 821 kfree(sei_page);
950} 822}
951 823
@@ -1043,3 +915,52 @@ exit:
1043 915
1044EXPORT_SYMBOL_GPL(css_general_characteristics); 916EXPORT_SYMBOL_GPL(css_general_characteristics);
1045EXPORT_SYMBOL_GPL(css_chsc_characteristics); 917EXPORT_SYMBOL_GPL(css_chsc_characteristics);
918
919int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
920{
921 struct {
922 struct chsc_header request;
923 unsigned int rsvd0;
924 unsigned int op : 8;
925 unsigned int rsvd1 : 8;
926 unsigned int ctrl : 16;
927 unsigned int rsvd2[5];
928 struct chsc_header response;
929 unsigned int rsvd3[7];
930 } __attribute__ ((packed)) *rr;
931 int rc;
932
933 memset(page, 0, PAGE_SIZE);
934 rr = page;
935 rr->request.length = 0x0020;
936 rr->request.code = 0x0033;
937 rr->op = op;
938 rr->ctrl = ctrl;
939 rc = chsc(rr);
940 if (rc)
941 return -EIO;
942 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
943 return rc;
944}
945
946int chsc_sstpi(void *page, void *result, size_t size)
947{
948 struct {
949 struct chsc_header request;
950 unsigned int rsvd0[3];
951 struct chsc_header response;
952 char data[size];
953 } __attribute__ ((packed)) *rr;
954 int rc;
955
956 memset(page, 0, PAGE_SIZE);
957 rr = page;
958 rr->request.length = 0x0010;
959 rr->request.code = 0x0038;
960 rc = chsc(rr);
961 if (rc)
962 return -EIO;
963 memcpy(result, &rr->data, size);
964 return (rr->response.code == 0x0001) ? 0 : -EIO;
965}
966
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1f5db1e69b9..fb6c4d6c45b4 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/chpid.h> 6#include <asm/chpid.h>
7#include "schid.h" 7#include <asm/chsc.h>
8#include <asm/schid.h>
8 9
9#define CHSC_SDA_OC_MSS 0x2 10#define CHSC_SDA_OC_MSS 0x2
10 11
@@ -36,14 +37,15 @@ struct channel_path_desc {
36 37
37struct channel_path; 38struct channel_path;
38 39
39extern void chsc_process_crw(void);
40
41struct css_general_char { 40struct css_general_char {
42 u64 : 41; 41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
43 u32 aif : 1; /* bit 41 */ 44 u32 aif : 1; /* bit 41 */
44 u32 : 3; 45 u32 : 3;
45 u32 mcss : 1; /* bit 45 */ 46 u32 mcss : 1; /* bit 45 */
46 u32 : 2; 47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
47 u32 ext_mb : 1; /* bit 48 */ 49 u32 ext_mb : 1; /* bit 48 */
48 u32 : 7; 50 u32 : 7;
49 u32 aif_tdd : 1; /* bit 56 */ 51 u32 aif_tdd : 1; /* bit 56 */
@@ -51,7 +53,11 @@ struct css_general_char {
51 u32 qebsm : 1; /* bit 58 */ 53 u32 qebsm : 1; /* bit 58 */
52 u32 : 8; 54 u32 : 8;
53 u32 aif_osa : 1; /* bit 67 */ 55 u32 aif_osa : 1; /* bit 67 */
54 u32 : 28; 56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
55}__attribute__((packed)); 61}__attribute__((packed));
56 62
57struct css_chsc_char { 63struct css_chsc_char {
@@ -78,7 +84,6 @@ struct chsc_ssd_info {
78extern int chsc_get_ssd_info(struct subchannel_id schid, 84extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd); 85 struct chsc_ssd_info *ssd);
80extern int chsc_determine_css_characteristics(void); 86extern int chsc_determine_css_characteristics(void);
81extern int css_characteristics_avail;
82extern int chsc_alloc_sei_area(void); 87extern int chsc_alloc_sei_area(void);
83extern void chsc_free_sei_area(void); 88extern void chsc_free_sei_area(void);
84 89
@@ -87,8 +92,11 @@ struct channel_subsystem;
87extern int chsc_secm(struct channel_subsystem *, int); 92extern int chsc_secm(struct channel_subsystem *, int);
88 93
89int chsc_chp_vary(struct chp_id chpid, int on); 94int chsc_chp_vary(struct chp_id chpid, int on);
90int chsc_determine_channel_path_description(struct chp_id chpid, 95int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
91 struct channel_path_desc *desc); 96 int c, int m,
97 struct chsc_response_struct *resp);
98int chsc_determine_base_channel_path_desc(struct chp_id chpid,
99 struct channel_path_desc *desc);
92void chsc_chp_online(struct chp_id chpid); 100void chsc_chp_online(struct chp_id chpid);
93void chsc_chp_offline(struct chp_id chpid); 101void chsc_chp_offline(struct chp_id chpid);
94int chsc_get_channel_measurement_chars(struct channel_path *chp); 102int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000000..91ca87aa9f97
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,820 @@
1/*
2 * Driver for s390 chsc subchannels
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 *
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/miscdevice.h>
13
14#include <asm/cio.h>
15#include <asm/chsc.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "chsc_sch.h"
22#include "ioasm.h"
23
24static debug_info_t *chsc_debug_msg_id;
25static debug_info_t *chsc_debug_log_id;
26
27#define CHSC_MSG(imp, args...) do { \
28 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
29 } while (0)
30
31#define CHSC_LOG(imp, txt) do { \
32 debug_text_event(chsc_debug_log_id, imp , txt); \
33 } while (0)
34
35static void CHSC_LOG_HEX(int level, void *data, int length)
36{
37 while (length > 0) {
38 debug_event(chsc_debug_log_id, level, data, length);
39 length -= chsc_debug_log_id->buf_size;
40 data += chsc_debug_log_id->buf_size;
41 }
42}
43
44MODULE_AUTHOR("IBM Corporation");
45MODULE_DESCRIPTION("driver for s390 chsc subchannels");
46MODULE_LICENSE("GPL");
47
48static void chsc_subchannel_irq(struct subchannel *sch)
49{
50 struct chsc_private *private = sch->private;
51 struct chsc_request *request = private->request;
52 struct irb *irb = (struct irb *)__LC_IRB;
53
54 CHSC_LOG(4, "irb");
55 CHSC_LOG_HEX(4, irb, sizeof(*irb));
56 /* Copy irb to provided request and set done. */
57 if (!request) {
58 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
59 sch->schid.ssid, sch->schid.sch_no);
60 return;
61 }
62 private->request = NULL;
63 memcpy(&request->irb, irb, sizeof(*irb));
64 stsch(sch->schid, &sch->schib);
65 complete(&request->completion);
66 put_device(&sch->dev);
67}
68
69static int chsc_subchannel_probe(struct subchannel *sch)
70{
71 struct chsc_private *private;
72 int ret;
73
74 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
75 sch->schid.ssid, sch->schid.sch_no);
76 sch->isc = CHSC_SCH_ISC;
77 private = kzalloc(sizeof(*private), GFP_KERNEL);
78 if (!private)
79 return -ENOMEM;
80 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
81 if (ret) {
82 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
83 sch->schid.ssid, sch->schid.sch_no, ret);
84 kfree(private);
85 } else {
86 sch->private = private;
87 if (sch->dev.uevent_suppress) {
88 sch->dev.uevent_suppress = 0;
89 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
90 }
91 }
92 return ret;
93}
94
95static int chsc_subchannel_remove(struct subchannel *sch)
96{
97 struct chsc_private *private;
98
99 cio_disable_subchannel(sch);
100 private = sch->private;
101 sch->private = NULL;
102 if (private->request) {
103 complete(&private->request->completion);
104 put_device(&sch->dev);
105 }
106 kfree(private);
107 return 0;
108}
109
110static void chsc_subchannel_shutdown(struct subchannel *sch)
111{
112 cio_disable_subchannel(sch);
113}
114
115static struct css_device_id chsc_subchannel_ids[] = {
116 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
117 { /* end of list */ },
118};
119MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
120
121static struct css_driver chsc_subchannel_driver = {
122 .owner = THIS_MODULE,
123 .subchannel_type = chsc_subchannel_ids,
124 .irq = chsc_subchannel_irq,
125 .probe = chsc_subchannel_probe,
126 .remove = chsc_subchannel_remove,
127 .shutdown = chsc_subchannel_shutdown,
128 .name = "chsc_subchannel",
129};
130
131static int __init chsc_init_dbfs(void)
132{
133 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
134 16 * sizeof(long));
135 if (!chsc_debug_msg_id)
136 goto out;
137 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
138 debug_set_level(chsc_debug_msg_id, 2);
139 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
140 if (!chsc_debug_log_id)
141 goto out;
142 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
143 debug_set_level(chsc_debug_log_id, 2);
144 return 0;
145out:
146 if (chsc_debug_msg_id)
147 debug_unregister(chsc_debug_msg_id);
148 return -ENOMEM;
149}
150
151static void chsc_remove_dbfs(void)
152{
153 debug_unregister(chsc_debug_log_id);
154 debug_unregister(chsc_debug_msg_id);
155}
156
157static int __init chsc_init_sch_driver(void)
158{
159 return css_driver_register(&chsc_subchannel_driver);
160}
161
162static void chsc_cleanup_sch_driver(void)
163{
164 css_driver_unregister(&chsc_subchannel_driver);
165}
166
167static DEFINE_SPINLOCK(chsc_lock);
168
169static int chsc_subchannel_match_next_free(struct device *dev, void *data)
170{
171 struct subchannel *sch = to_subchannel(dev);
172
173 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
174}
175
176static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
177{
178 struct device *dev;
179
180 dev = driver_find_device(&chsc_subchannel_driver.drv,
181 sch ? &sch->dev : NULL, NULL,
182 chsc_subchannel_match_next_free);
183 return dev ? to_subchannel(dev) : NULL;
184}
185
186/**
187 * chsc_async() - try to start a chsc request asynchronously
188 * @chsc_area: request to be started
189 * @request: request structure to associate
190 *
191 * Tries to start a chsc request on one of the existing chsc subchannels.
192 * Returns:
193 * %0 if the request was performed synchronously
194 * %-EINPROGRESS if the request was successfully started
195 * %-EBUSY if all chsc subchannels are busy
196 * %-ENODEV if no chsc subchannels are available
197 * Context:
198 * interrupts disabled, chsc_lock held
199 */
200static int chsc_async(struct chsc_async_area *chsc_area,
201 struct chsc_request *request)
202{
203 int cc;
204 struct chsc_private *private;
205 struct subchannel *sch = NULL;
206 int ret = -ENODEV;
207 char dbf[10];
208
209 chsc_area->header.key = PAGE_DEFAULT_KEY;
210 while ((sch = chsc_get_next_subchannel(sch))) {
211 spin_lock(sch->lock);
212 private = sch->private;
213 if (private->request) {
214 spin_unlock(sch->lock);
215 ret = -EBUSY;
216 continue;
217 }
218 chsc_area->header.sid = sch->schid;
219 CHSC_LOG(2, "schid");
220 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
221 cc = chsc(chsc_area);
222 sprintf(dbf, "cc:%d", cc);
223 CHSC_LOG(2, dbf);
224 switch (cc) {
225 case 0:
226 ret = 0;
227 break;
228 case 1:
229 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
230 ret = -EINPROGRESS;
231 private->request = request;
232 break;
233 case 2:
234 ret = -EBUSY;
235 break;
236 default:
237 ret = -ENODEV;
238 }
239 spin_unlock(sch->lock);
240 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
241 sch->schid.ssid, sch->schid.sch_no, cc);
242 if (ret == -EINPROGRESS)
243 return -EINPROGRESS;
244 put_device(&sch->dev);
245 if (ret == 0)
246 return 0;
247 }
248 return ret;
249}
250
251static void chsc_log_command(struct chsc_async_area *chsc_area)
252{
253 char dbf[10];
254
255 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
256 CHSC_LOG(0, dbf);
257 CHSC_LOG_HEX(0, chsc_area, 32);
258}
259
260static int chsc_examine_irb(struct chsc_request *request)
261{
262 int backed_up;
263
264 if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
265 return -EIO;
266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
268 if (scsw_cstat(&request->irb.scsw) == 0)
269 return 0;
270 if (!backed_up)
271 return 0;
272 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
273 return -EIO;
274 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
275 return -EPERM;
276 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
277 return -EAGAIN;
278 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
279 return -EAGAIN;
280 return -EIO;
281}
282
283static int chsc_ioctl_start(void __user *user_area)
284{
285 struct chsc_request *request;
286 struct chsc_async_area *chsc_area;
287 int ret;
288 char dbf[10];
289
290 if (!css_general_characteristics.dynio)
291 /* It makes no sense to try. */
292 return -EOPNOTSUPP;
293 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
294 if (!chsc_area)
295 return -ENOMEM;
296 request = kzalloc(sizeof(*request), GFP_KERNEL);
297 if (!request) {
298 ret = -ENOMEM;
299 goto out_free;
300 }
301 init_completion(&request->completion);
302 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
303 ret = -EFAULT;
304 goto out_free;
305 }
306 chsc_log_command(chsc_area);
307 spin_lock_irq(&chsc_lock);
308 ret = chsc_async(chsc_area, request);
309 spin_unlock_irq(&chsc_lock);
310 if (ret == -EINPROGRESS) {
311 wait_for_completion(&request->completion);
312 ret = chsc_examine_irb(request);
313 }
314 /* copy area back to user */
315 if (!ret)
316 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
317 ret = -EFAULT;
318out_free:
319 sprintf(dbf, "ret:%d", ret);
320 CHSC_LOG(0, dbf);
321 kfree(request);
322 free_page((unsigned long)chsc_area);
323 return ret;
324}
325
326static int chsc_ioctl_info_channel_path(void __user *user_cd)
327{
328 struct chsc_chp_cd *cd;
329 int ret, ccode;
330 struct {
331 struct chsc_header request;
332 u32 : 2;
333 u32 m : 1;
334 u32 : 1;
335 u32 fmt1 : 4;
336 u32 cssid : 8;
337 u32 : 8;
338 u32 first_chpid : 8;
339 u32 : 24;
340 u32 last_chpid : 8;
341 u32 : 32;
342 struct chsc_header response;
343 u8 data[PAGE_SIZE - 20];
344 } __attribute__ ((packed)) *scpcd_area;
345
346 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
347 if (!scpcd_area)
348 return -ENOMEM;
349 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
350 if (!cd) {
351 ret = -ENOMEM;
352 goto out_free;
353 }
354 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
355 ret = -EFAULT;
356 goto out_free;
357 }
358 scpcd_area->request.length = 0x0010;
359 scpcd_area->request.code = 0x0028;
360 scpcd_area->m = cd->m;
361 scpcd_area->fmt1 = cd->fmt;
362 scpcd_area->cssid = cd->chpid.cssid;
363 scpcd_area->first_chpid = cd->chpid.id;
364 scpcd_area->last_chpid = cd->chpid.id;
365
366 ccode = chsc(scpcd_area);
367 if (ccode != 0) {
368 ret = -EIO;
369 goto out_free;
370 }
371 if (scpcd_area->response.code != 0x0001) {
372 ret = -EIO;
373 CHSC_MSG(0, "scpcd: response code=%x\n",
374 scpcd_area->response.code);
375 goto out_free;
376 }
377 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
378 if (copy_to_user(user_cd, cd, sizeof(*cd)))
379 ret = -EFAULT;
380 else
381 ret = 0;
382out_free:
383 kfree(cd);
384 free_page((unsigned long)scpcd_area);
385 return ret;
386}
387
388static int chsc_ioctl_info_cu(void __user *user_cd)
389{
390 struct chsc_cu_cd *cd;
391 int ret, ccode;
392 struct {
393 struct chsc_header request;
394 u32 : 2;
395 u32 m : 1;
396 u32 : 1;
397 u32 fmt1 : 4;
398 u32 cssid : 8;
399 u32 : 8;
400 u32 first_cun : 8;
401 u32 : 24;
402 u32 last_cun : 8;
403 u32 : 32;
404 struct chsc_header response;
405 u8 data[PAGE_SIZE - 20];
406 } __attribute__ ((packed)) *scucd_area;
407
408 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
409 if (!scucd_area)
410 return -ENOMEM;
411 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
412 if (!cd) {
413 ret = -ENOMEM;
414 goto out_free;
415 }
416 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
417 ret = -EFAULT;
418 goto out_free;
419 }
420 scucd_area->request.length = 0x0010;
421 scucd_area->request.code = 0x0028;
422 scucd_area->m = cd->m;
423 scucd_area->fmt1 = cd->fmt;
424 scucd_area->cssid = cd->cssid;
425 scucd_area->first_cun = cd->cun;
426 scucd_area->last_cun = cd->cun;
427
428 ccode = chsc(scucd_area);
429 if (ccode != 0) {
430 ret = -EIO;
431 goto out_free;
432 }
433 if (scucd_area->response.code != 0x0001) {
434 ret = -EIO;
435 CHSC_MSG(0, "scucd: response code=%x\n",
436 scucd_area->response.code);
437 goto out_free;
438 }
439 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
440 if (copy_to_user(user_cd, cd, sizeof(*cd)))
441 ret = -EFAULT;
442 else
443 ret = 0;
444out_free:
445 kfree(cd);
446 free_page((unsigned long)scucd_area);
447 return ret;
448}
449
450static int chsc_ioctl_info_sch_cu(void __user *user_cud)
451{
452 struct chsc_sch_cud *cud;
453 int ret, ccode;
454 struct {
455 struct chsc_header request;
456 u32 : 2;
457 u32 m : 1;
458 u32 : 5;
459 u32 fmt1 : 4;
460 u32 : 2;
461 u32 ssid : 2;
462 u32 first_sch : 16;
463 u32 : 8;
464 u32 cssid : 8;
465 u32 last_sch : 16;
466 u32 : 32;
467 struct chsc_header response;
468 u8 data[PAGE_SIZE - 20];
469 } __attribute__ ((packed)) *sscud_area;
470
471 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
472 if (!sscud_area)
473 return -ENOMEM;
474 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
475 if (!cud) {
476 ret = -ENOMEM;
477 goto out_free;
478 }
479 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
480 ret = -EFAULT;
481 goto out_free;
482 }
483 sscud_area->request.length = 0x0010;
484 sscud_area->request.code = 0x0006;
485 sscud_area->m = cud->schid.m;
486 sscud_area->fmt1 = cud->fmt;
487 sscud_area->ssid = cud->schid.ssid;
488 sscud_area->first_sch = cud->schid.sch_no;
489 sscud_area->cssid = cud->schid.cssid;
490 sscud_area->last_sch = cud->schid.sch_no;
491
492 ccode = chsc(sscud_area);
493 if (ccode != 0) {
494 ret = -EIO;
495 goto out_free;
496 }
497 if (sscud_area->response.code != 0x0001) {
498 ret = -EIO;
499 CHSC_MSG(0, "sscud: response code=%x\n",
500 sscud_area->response.code);
501 goto out_free;
502 }
503 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
504 if (copy_to_user(user_cud, cud, sizeof(*cud)))
505 ret = -EFAULT;
506 else
507 ret = 0;
508out_free:
509 kfree(cud);
510 free_page((unsigned long)sscud_area);
511 return ret;
512}
513
514static int chsc_ioctl_conf_info(void __user *user_ci)
515{
516 struct chsc_conf_info *ci;
517 int ret, ccode;
518 struct {
519 struct chsc_header request;
520 u32 : 2;
521 u32 m : 1;
522 u32 : 1;
523 u32 fmt1 : 4;
524 u32 cssid : 8;
525 u32 : 6;
526 u32 ssid : 2;
527 u32 : 8;
528 u64 : 64;
529 struct chsc_header response;
530 u8 data[PAGE_SIZE - 20];
531 } __attribute__ ((packed)) *sci_area;
532
533 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
534 if (!sci_area)
535 return -ENOMEM;
536 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
537 if (!ci) {
538 ret = -ENOMEM;
539 goto out_free;
540 }
541 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
542 ret = -EFAULT;
543 goto out_free;
544 }
545 sci_area->request.length = 0x0010;
546 sci_area->request.code = 0x0012;
547 sci_area->m = ci->id.m;
548 sci_area->fmt1 = ci->fmt;
549 sci_area->cssid = ci->id.cssid;
550 sci_area->ssid = ci->id.ssid;
551
552 ccode = chsc(sci_area);
553 if (ccode != 0) {
554 ret = -EIO;
555 goto out_free;
556 }
557 if (sci_area->response.code != 0x0001) {
558 ret = -EIO;
559 CHSC_MSG(0, "sci: response code=%x\n",
560 sci_area->response.code);
561 goto out_free;
562 }
563 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
564 if (copy_to_user(user_ci, ci, sizeof(*ci)))
565 ret = -EFAULT;
566 else
567 ret = 0;
568out_free:
569 kfree(ci);
570 free_page((unsigned long)sci_area);
571 return ret;
572}
573
574static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
575{
576 struct chsc_comp_list *ccl;
577 int ret, ccode;
578 struct {
579 struct chsc_header request;
580 u32 ctype : 8;
581 u32 : 4;
582 u32 fmt : 4;
583 u32 : 16;
584 u64 : 64;
585 u32 list_parm[2];
586 u64 : 64;
587 struct chsc_header response;
588 u8 data[PAGE_SIZE - 36];
589 } __attribute__ ((packed)) *sccl_area;
590 struct {
591 u32 m : 1;
592 u32 : 31;
593 u32 cssid : 8;
594 u32 : 16;
595 u32 chpid : 8;
596 } __attribute__ ((packed)) *chpid_parm;
597 struct {
598 u32 f_cssid : 8;
599 u32 l_cssid : 8;
600 u32 : 16;
601 u32 res;
602 } __attribute__ ((packed)) *cssids_parm;
603
604 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
605 if (!sccl_area)
606 return -ENOMEM;
607 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
608 if (!ccl) {
609 ret = -ENOMEM;
610 goto out_free;
611 }
612 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
613 ret = -EFAULT;
614 goto out_free;
615 }
616 sccl_area->request.length = 0x0020;
617 sccl_area->request.code = 0x0030;
618 sccl_area->fmt = ccl->req.fmt;
619 sccl_area->ctype = ccl->req.ctype;
620 switch (sccl_area->ctype) {
621 case CCL_CU_ON_CHP:
622 case CCL_IOP_CHP:
623 chpid_parm = (void *)&sccl_area->list_parm;
624 chpid_parm->m = ccl->req.chpid.m;
625 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
626 chpid_parm->chpid = ccl->req.chpid.chp.id;
627 break;
628 case CCL_CSS_IMG:
629 case CCL_CSS_IMG_CONF_CHAR:
630 cssids_parm = (void *)&sccl_area->list_parm;
631 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
632 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
633 break;
634 }
635 ccode = chsc(sccl_area);
636 if (ccode != 0) {
637 ret = -EIO;
638 goto out_free;
639 }
640 if (sccl_area->response.code != 0x0001) {
641 ret = -EIO;
642 CHSC_MSG(0, "sccl: response code=%x\n",
643 sccl_area->response.code);
644 goto out_free;
645 }
646 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
647 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
648 ret = -EFAULT;
649 else
650 ret = 0;
651out_free:
652 kfree(ccl);
653 free_page((unsigned long)sccl_area);
654 return ret;
655}
656
657static int chsc_ioctl_chpd(void __user *user_chpd)
658{
659 struct chsc_cpd_info *chpd;
660 int ret;
661
662 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
663 if (!chpd)
664 return -ENOMEM;
665 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
666 ret = -EFAULT;
667 goto out_free;
668 }
669 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
670 chpd->rfmt, chpd->c, chpd->m,
671 &chpd->chpdb);
672 if (ret)
673 goto out_free;
674 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
675 ret = -EFAULT;
676out_free:
677 kfree(chpd);
678 return ret;
679}
680
681static int chsc_ioctl_dcal(void __user *user_dcal)
682{
683 struct chsc_dcal *dcal;
684 int ret, ccode;
685 struct {
686 struct chsc_header request;
687 u32 atype : 8;
688 u32 : 4;
689 u32 fmt : 4;
690 u32 : 16;
691 u32 res0[2];
692 u32 list_parm[2];
693 u32 res1[2];
694 struct chsc_header response;
695 u8 data[PAGE_SIZE - 36];
696 } __attribute__ ((packed)) *sdcal_area;
697
698 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
699 if (!sdcal_area)
700 return -ENOMEM;
701 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
702 if (!dcal) {
703 ret = -ENOMEM;
704 goto out_free;
705 }
706 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
707 ret = -EFAULT;
708 goto out_free;
709 }
710 sdcal_area->request.length = 0x0020;
711 sdcal_area->request.code = 0x0034;
712 sdcal_area->atype = dcal->req.atype;
713 sdcal_area->fmt = dcal->req.fmt;
714 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
715 sizeof(sdcal_area->list_parm));
716
717 ccode = chsc(sdcal_area);
718 if (ccode != 0) {
719 ret = -EIO;
720 goto out_free;
721 }
722 if (sdcal_area->response.code != 0x0001) {
723 ret = -EIO;
724 CHSC_MSG(0, "sdcal: response code=%x\n",
725 sdcal_area->response.code);
726 goto out_free;
727 }
728 memcpy(&dcal->sdcal, &sdcal_area->response,
729 sdcal_area->response.length);
730 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
731 ret = -EFAULT;
732 else
733 ret = 0;
734out_free:
735 kfree(dcal);
736 free_page((unsigned long)sdcal_area);
737 return ret;
738}
739
740static long chsc_ioctl(struct file *filp, unsigned int cmd,
741 unsigned long arg)
742{
743 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
744 switch (cmd) {
745 case CHSC_START:
746 return chsc_ioctl_start((void __user *)arg);
747 case CHSC_INFO_CHANNEL_PATH:
748 return chsc_ioctl_info_channel_path((void __user *)arg);
749 case CHSC_INFO_CU:
750 return chsc_ioctl_info_cu((void __user *)arg);
751 case CHSC_INFO_SCH_CU:
752 return chsc_ioctl_info_sch_cu((void __user *)arg);
753 case CHSC_INFO_CI:
754 return chsc_ioctl_conf_info((void __user *)arg);
755 case CHSC_INFO_CCL:
756 return chsc_ioctl_conf_comp_list((void __user *)arg);
757 case CHSC_INFO_CPD:
758 return chsc_ioctl_chpd((void __user *)arg);
759 case CHSC_INFO_DCAL:
760 return chsc_ioctl_dcal((void __user *)arg);
761 default: /* unknown ioctl number */
762 return -ENOIOCTLCMD;
763 }
764}
765
766static const struct file_operations chsc_fops = {
767 .owner = THIS_MODULE,
768 .unlocked_ioctl = chsc_ioctl,
769 .compat_ioctl = chsc_ioctl,
770};
771
772static struct miscdevice chsc_misc_device = {
773 .minor = MISC_DYNAMIC_MINOR,
774 .name = "chsc",
775 .fops = &chsc_fops,
776};
777
778static int __init chsc_misc_init(void)
779{
780 return misc_register(&chsc_misc_device);
781}
782
783static void chsc_misc_cleanup(void)
784{
785 misc_deregister(&chsc_misc_device);
786}
787
788static int __init chsc_sch_init(void)
789{
790 int ret;
791
792 ret = chsc_init_dbfs();
793 if (ret)
794 return ret;
795 isc_register(CHSC_SCH_ISC);
796 ret = chsc_init_sch_driver();
797 if (ret)
798 goto out_dbf;
799 ret = chsc_misc_init();
800 if (ret)
801 goto out_driver;
802 return ret;
803out_driver:
804 chsc_cleanup_sch_driver();
805out_dbf:
806 isc_unregister(CHSC_SCH_ISC);
807 chsc_remove_dbfs();
808 return ret;
809}
810
811static void __exit chsc_sch_exit(void)
812{
813 chsc_misc_cleanup();
814 chsc_cleanup_sch_driver();
815 isc_unregister(CHSC_SCH_ISC);
816 chsc_remove_dbfs();
817}
818
819module_init(chsc_sch_init);
820module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000000..589ebfad6aad
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
1#ifndef _CHSC_SCH_H
2#define _CHSC_SCH_H
3
4struct chsc_request {
5 struct completion completion;
6 struct irb irb;
7};
8
9struct chsc_private {
10 struct chsc_request *request;
11};
12
13#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b32d7eb3d81a..33bff8fec7d1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -24,7 +24,9 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/isc.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
29#include <asm/fcx.h>
28#include "cio.h" 30#include "cio.h"
29#include "css.h" 31#include "css.h"
30#include "chsc.h" 32#include "chsc.h"
@@ -72,7 +74,6 @@ out_unregister:
72 debug_unregister(cio_debug_trace_id); 74 debug_unregister(cio_debug_trace_id);
73 if (cio_debug_crw_id) 75 if (cio_debug_crw_id)
74 debug_unregister(cio_debug_crw_id); 76 debug_unregister(cio_debug_crw_id);
75 printk(KERN_WARNING"cio: could not initialize debugging\n");
76 return -1; 77 return -1;
77} 78}
78 79
@@ -128,7 +129,7 @@ cio_tpi(void)
128 local_bh_disable(); 129 local_bh_disable();
129 irq_enter (); 130 irq_enter ();
130 spin_lock(sch->lock); 131 spin_lock(sch->lock);
131 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 132 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
132 if (sch->driver && sch->driver->irq) 133 if (sch->driver && sch->driver->irq)
133 sch->driver->irq(sch); 134 sch->driver->irq(sch);
134 spin_unlock(sch->lock); 135 spin_unlock(sch->lock);
@@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
167{ 168{
168 char dbf_txt[15]; 169 char dbf_txt[15];
169 int ccode; 170 int ccode;
170 struct orb *orb; 171 union orb *orb;
171 172
172 CIO_TRACE_EVENT(4, "stIO"); 173 CIO_TRACE_EVENT(4, "stIO");
173 CIO_TRACE_EVENT(4, sch->dev.bus_id); 174 CIO_TRACE_EVENT(4, sch->dev.bus_id);
174 175
175 orb = &to_io_private(sch)->orb; 176 orb = &to_io_private(sch)->orb;
176 /* sch is always under 2G. */ 177 /* sch is always under 2G. */
177 orb->intparm = (u32)(addr_t)sch; 178 orb->cmd.intparm = (u32)(addr_t)sch;
178 orb->fmt = 1; 179 orb->cmd.fmt = 1;
179 180
180 orb->pfch = sch->options.prefetch == 0; 181 orb->cmd.pfch = sch->options.prefetch == 0;
181 orb->spnd = sch->options.suspend; 182 orb->cmd.spnd = sch->options.suspend;
182 orb->ssic = sch->options.suspend && sch->options.inter; 183 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
183 orb->lpm = (lpm != 0) ? lpm : sch->lpm; 184 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
184#ifdef CONFIG_64BIT 185#ifdef CONFIG_64BIT
185 /* 186 /*
186 * for 64 bit we always support 64 bit IDAWs with 4k page size only 187 * for 64 bit we always support 64 bit IDAWs with 4k page size only
187 */ 188 */
188 orb->c64 = 1; 189 orb->cmd.c64 = 1;
189 orb->i2k = 0; 190 orb->cmd.i2k = 0;
190#endif 191#endif
191 orb->key = key >> 4; 192 orb->cmd.key = key >> 4;
192 /* issue "Start Subchannel" */ 193 /* issue "Start Subchannel" */
193 orb->cpa = (__u32) __pa(cpa); 194 orb->cmd.cpa = (__u32) __pa(cpa);
194 ccode = ssch(sch->schid, orb); 195 ccode = ssch(sch->schid, orb);
195 196
196 /* process condition code */ 197 /* process condition code */
@@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
202 /* 203 /*
203 * initialize device status information 204 * initialize device status information
204 */ 205 */
205 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; 206 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
206 return 0; 207 return 0;
207 case 1: /* status pending */ 208 case 1: /* status pending */
208 case 2: /* busy */ 209 case 2: /* busy */
@@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch)
237 238
238 switch (ccode) { 239 switch (ccode) {
239 case 0: 240 case 0:
240 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; 241 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
241 return 0; 242 return 0;
242 case 1: 243 case 1:
243 return -EBUSY; 244 return -EBUSY;
@@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch)
277 278
278 switch (ccode) { 279 switch (ccode) {
279 case 0: 280 case 0:
280 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; 281 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
281 return 0; 282 return 0;
282 case 1: /* status pending */ 283 case 1: /* status pending */
283 case 2: /* busy */ 284 case 2: /* busy */
@@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch)
312 313
313 switch (ccode) { 314 switch (ccode) {
314 case 0: 315 case 0:
315 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; 316 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
316 return 0; 317 return 0;
317 default: /* device not operational */ 318 default: /* device not operational */
318 return -ENODEV; 319 return -ENODEV;
@@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch)
387 return ret; 388 return ret;
388} 389}
389 390
390/* 391/**
391 * Enable subchannel. 392 * cio_enable_subchannel - enable a subchannel.
393 * @sch: subchannel to be enabled
394 * @intparm: interruption parameter to set
392 */ 395 */
393int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 396int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
394{ 397{
@@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
434 CIO_TRACE_EVENT (2, dbf_txt); 437 CIO_TRACE_EVENT (2, dbf_txt);
435 return ret; 438 return ret;
436} 439}
440EXPORT_SYMBOL_GPL(cio_enable_subchannel);
437 441
438/* 442/**
439 * Disable subchannel. 443 * cio_disable_subchannel - disable a subchannel.
444 * @sch: subchannel to disable
440 */ 445 */
441int 446int cio_disable_subchannel(struct subchannel *sch)
442cio_disable_subchannel (struct subchannel *sch)
443{ 447{
444 char dbf_txt[15]; 448 char dbf_txt[15];
445 int ccode; 449 int ccode;
@@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch)
455 if (ccode == 3) /* Not operational. */ 459 if (ccode == 3) /* Not operational. */
456 return -ENODEV; 460 return -ENODEV;
457 461
458 if (sch->schib.scsw.actl != 0) 462 if (scsw_actl(&sch->schib.scsw) != 0)
459 /* 463 /*
460 * the disable function must not be called while there are 464 * the disable function must not be called while there are
461 * requests pending for completion ! 465 * requests pending for completion !
@@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch)
484 CIO_TRACE_EVENT (2, dbf_txt); 488 CIO_TRACE_EVENT (2, dbf_txt);
485 return ret; 489 return ret;
486} 490}
491EXPORT_SYMBOL_GPL(cio_disable_subchannel);
487 492
488int cio_create_sch_lock(struct subchannel *sch) 493int cio_create_sch_lock(struct subchannel *sch)
489{ 494{
@@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch)
494 return 0; 499 return 0;
495} 500}
496 501
497/* 502static int cio_check_devno_blacklisted(struct subchannel *sch)
498 * cio_validate_subchannel() 503{
504 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
505 /*
506 * This device must not be known to Linux. So we simply
507 * say that there is no device and return ENODEV.
508 */
509 CIO_MSG_EVENT(6, "Blacklisted device detected "
510 "at devno %04X, subchannel set %x\n",
511 sch->schib.pmcw.dev, sch->schid.ssid);
512 return -ENODEV;
513 }
514 return 0;
515}
516
517static int cio_validate_io_subchannel(struct subchannel *sch)
518{
519 /* Initialization for io subchannels. */
520 if (!css_sch_is_valid(&sch->schib))
521 return -ENODEV;
522
523 /* Devno is valid. */
524 return cio_check_devno_blacklisted(sch);
525}
526
527static int cio_validate_msg_subchannel(struct subchannel *sch)
528{
529 /* Initialization for message subchannels. */
530 if (!css_sch_is_valid(&sch->schib))
531 return -ENODEV;
532
533 /* Devno is valid. */
534 return cio_check_devno_blacklisted(sch);
535}
536
537/**
538 * cio_validate_subchannel - basic validation of subchannel
539 * @sch: subchannel structure to be filled out
540 * @schid: subchannel id
499 * 541 *
500 * Find out subchannel type and initialize struct subchannel. 542 * Find out subchannel type and initialize struct subchannel.
501 * Return codes: 543 * Return codes:
502 * SUBCHANNEL_TYPE_IO for a normal io subchannel 544 * 0 on success
503 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
504 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
505 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
506 * -ENXIO for non-defined subchannels 545 * -ENXIO for non-defined subchannels
507 * -ENODEV for subchannels with invalid device number or blacklisted devices 546 * -ENODEV for invalid subchannels or blacklisted devices
547 * -EIO for subchannels in an invalid subchannel set
508 */ 548 */
509int 549int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
510cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
511{ 550{
512 char dbf_txt[15]; 551 char dbf_txt[15];
513 int ccode; 552 int ccode;
514 int err; 553 int err;
515 554
516 sprintf (dbf_txt, "valsch%x", schid.sch_no); 555 sprintf(dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 556 CIO_TRACE_EVENT(4, dbf_txt);
518 557
519 /* Nuke all fields. */ 558 /* Nuke all fields. */
520 memset(sch, 0, sizeof(struct subchannel)); 559 memset(sch, 0, sizeof(struct subchannel));
@@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
546 /* Copy subchannel type from path management control word. */ 585 /* Copy subchannel type from path management control word. */
547 sch->st = sch->schib.pmcw.st; 586 sch->st = sch->schib.pmcw.st;
548 587
549 /* 588 switch (sch->st) {
550 * ... just being curious we check for non I/O subchannels 589 case SUBCHANNEL_TYPE_IO:
551 */ 590 err = cio_validate_io_subchannel(sch);
552 if (sch->st != 0) { 591 break;
553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " 592 case SUBCHANNEL_TYPE_MSG:
554 "non-I/O subchannel type %04X\n", 593 err = cio_validate_msg_subchannel(sch);
555 sch->schid.ssid, sch->schid.sch_no, sch->st); 594 break;
556 /* We stop here for non-io subchannels. */ 595 default:
557 err = sch->st; 596 err = 0;
558 goto out;
559 }
560
561 /* Initialization for io subchannels. */
562 if (!css_sch_is_valid(&sch->schib)) {
563 err = -ENODEV;
564 goto out;
565 } 597 }
566 598 if (err)
567 /* Devno is valid. */
568 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
569 /*
570 * This device must not be known to Linux. So we simply
571 * say that there is no device and return ENODEV.
572 */
573 CIO_MSG_EVENT(6, "Blacklisted device detected "
574 "at devno %04X, subchannel set %x\n",
575 sch->schib.pmcw.dev, sch->schid.ssid);
576 err = -ENODEV;
577 goto out; 599 goto out;
578 }
579 if (cio_is_console(sch->schid)) {
580 sch->opm = 0xff;
581 sch->isc = 1;
582 } else {
583 sch->opm = chp_get_sch_opm(sch);
584 sch->isc = 3;
585 }
586 sch->lpm = sch->schib.pmcw.pam & sch->opm;
587
588 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
589 "- PIM = %02X, PAM = %02X, POM = %02X\n",
590 sch->schib.pmcw.dev, sch->schid.ssid,
591 sch->schid.sch_no, sch->schib.pmcw.pim,
592 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
593 600
594 /* 601 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
595 * We now have to initially ... 602 sch->schid.ssid, sch->schid.sch_no, sch->st);
596 * ... enable "concurrent sense"
597 * ... enable "multipath mode" if more than one
598 * CHPID is available. This is done regardless
599 * whether multiple paths are available for us.
600 */
601 sch->schib.pmcw.csense = 1; /* concurrent sense */
602 sch->schib.pmcw.ena = 0;
603 if ((sch->lpm & (sch->lpm - 1)) != 0)
604 sch->schib.pmcw.mp = 1; /* multipath mode */
605 /* clean up possible residual cmf stuff */
606 sch->schib.pmcw.mme = 0;
607 sch->schib.pmcw.mbfc = 0;
608 sch->schib.pmcw.mbi = 0;
609 sch->schib.mba = 0;
610 return 0; 603 return 0;
611out: 604out:
612 if (!cio_is_console(schid)) 605 if (!cio_is_console(schid))
@@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs)
647 */ 640 */
648 if (tpi_info->adapter_IO == 1 && 641 if (tpi_info->adapter_IO == 1 &&
649 tpi_info->int_type == IO_INTERRUPT_TYPE) { 642 tpi_info->int_type == IO_INTERRUPT_TYPE) {
650 do_adapter_IO(); 643 do_adapter_IO(tpi_info->isc);
651 continue; 644 continue;
652 } 645 }
653 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 646 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -706,9 +699,9 @@ void wait_cons_dev(void)
706 if (!console_subchannel_in_use) 699 if (!console_subchannel_in_use)
707 return; 700 return;
708 701
709 /* disable all but isc 1 (console device) */ 702 /* disable all but the console isc */
710 __ctl_store (save_cr6, 6, 6); 703 __ctl_store (save_cr6, 6, 6);
711 cr6 = 0x40000000; 704 cr6 = 1UL << (31 - CONSOLE_ISC);
712 __ctl_load (cr6, 6, 6); 705 __ctl_load (cr6, 6, 6);
713 706
714 do { 707 do {
@@ -716,7 +709,7 @@ void wait_cons_dev(void)
716 if (!cio_tpi()) 709 if (!cio_tpi())
717 cpu_relax(); 710 cpu_relax();
718 spin_lock(console_subchannel.lock); 711 spin_lock(console_subchannel.lock);
719 } while (console_subchannel.schib.scsw.actl != 0); 712 } while (console_subchannel.schib.scsw.cmd.actl != 0);
720 /* 713 /*
721 * restore previous isc value 714 * restore previous isc value
722 */ 715 */
@@ -761,7 +754,6 @@ cio_get_console_sch_no(void)
761 /* unlike in 2.4, we cannot autoprobe here, since 754 /* unlike in 2.4, we cannot autoprobe here, since
762 * the channel subsystem is not fully initialized. 755 * the channel subsystem is not fully initialized.
763 * With some luck, the HWC console can take over */ 756 * With some luck, the HWC console can take over */
764 printk(KERN_WARNING "cio: No ccw console found!\n");
765 return -1; 757 return -1;
766 } 758 }
767 return console_irq; 759 return console_irq;
@@ -778,6 +770,7 @@ cio_probe_console(void)
778 sch_no = cio_get_console_sch_no(); 770 sch_no = cio_get_console_sch_no();
779 if (sch_no == -1) { 771 if (sch_no == -1) {
780 console_subchannel_in_use = 0; 772 console_subchannel_in_use = 0;
773 printk(KERN_WARNING "cio: No ccw console found!\n");
781 return ERR_PTR(-ENODEV); 774 return ERR_PTR(-ENODEV);
782 } 775 }
783 memset(&console_subchannel, 0, sizeof(struct subchannel)); 776 memset(&console_subchannel, 0, sizeof(struct subchannel));
@@ -790,15 +783,15 @@ cio_probe_console(void)
790 } 783 }
791 784
792 /* 785 /*
793 * enable console I/O-interrupt subclass 1 786 * enable console I/O-interrupt subclass
794 */ 787 */
795 ctl_set_bit(6, 30); 788 isc_register(CONSOLE_ISC);
796 console_subchannel.isc = 1; 789 console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
797 console_subchannel.schib.pmcw.isc = 1;
798 console_subchannel.schib.pmcw.intparm = 790 console_subchannel.schib.pmcw.intparm =
799 (u32)(addr_t)&console_subchannel; 791 (u32)(addr_t)&console_subchannel;
800 ret = cio_modify(&console_subchannel); 792 ret = cio_modify(&console_subchannel);
801 if (ret) { 793 if (ret) {
794 isc_unregister(CONSOLE_ISC);
802 console_subchannel_in_use = 0; 795 console_subchannel_in_use = 0;
803 return ERR_PTR(ret); 796 return ERR_PTR(ret);
804 } 797 }
@@ -810,7 +803,7 @@ cio_release_console(void)
810{ 803{
811 console_subchannel.schib.pmcw.intparm = 0; 804 console_subchannel.schib.pmcw.intparm = 0;
812 cio_modify(&console_subchannel); 805 cio_modify(&console_subchannel);
813 ctl_clear_bit(6, 24); 806 isc_unregister(CONSOLE_ISC);
814 console_subchannel_in_use = 0; 807 console_subchannel_in_use = 0;
815} 808}
816 809
@@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs)
864} 857}
865 858
866static int 859static int
867__clear_subchannel_easy(struct subchannel_id schid) 860__clear_io_subchannel_easy(struct subchannel_id schid)
868{ 861{
869 int retry; 862 int retry;
870 863
@@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
883 return -EBUSY; 876 return -EBUSY;
884} 877}
885 878
879static void __clear_chsc_subchannel_easy(void)
880{
881 /* It seems we can only wait for a bit here :/ */
882 udelay_reset(100);
883}
884
886static int pgm_check_occured; 885static int pgm_check_occured;
887 886
888static void cio_reset_pgm_check_handler(void) 887static void cio_reset_pgm_check_handler(void)
@@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
921 case -ENODEV: 920 case -ENODEV:
922 break; 921 break;
923 default: /* -EBUSY */ 922 default: /* -EBUSY */
924 if (__clear_subchannel_easy(schid)) 923 switch (schib.pmcw.st) {
925 break; /* give up... */ 924 case SUBCHANNEL_TYPE_IO:
925 if (__clear_io_subchannel_easy(schid))
926 goto out; /* give up... */
927 break;
928 case SUBCHANNEL_TYPE_CHSC:
929 __clear_chsc_subchannel_easy();
930 break;
931 default:
932 /* No default clear strategy */
933 break;
934 }
926 stsch(schid, &schib); 935 stsch(schid, &schib);
927 __disable_subchannel_easy(schid, &schib); 936 __disable_subchannel_easy(schid, &schib);
928 } 937 }
938out:
929 return 0; 939 return 0;
930} 940}
931 941
@@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 iplinfo->is_qdio = schib.pmcw.qf; 1078 iplinfo->is_qdio = schib.pmcw.qf;
1069 return 0; 1079 return 0;
1070} 1080}
1081
1082/**
1083 * cio_tm_start_key - perform start function
1084 * @sch: subchannel on which to perform the start function
1085 * @tcw: transport-command word to be started
1086 * @lpm: mask of paths to use
1087 * @key: storage key to use for storage access
1088 *
1089 * Start the tcw on the given subchannel. Return zero on success, non-zero
1090 * otherwise.
1091 */
1092int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1093{
1094 int cc;
1095 union orb *orb = &to_io_private(sch)->orb;
1096
1097 memset(orb, 0, sizeof(union orb));
1098 orb->tm.intparm = (u32) (addr_t) sch;
1099 orb->tm.key = key >> 4;
1100 orb->tm.b = 1;
1101 orb->tm.lpm = lpm ? lpm : sch->lpm;
1102 orb->tm.tcw = (u32) (addr_t) tcw;
1103 cc = ssch(sch->schid, orb);
1104 switch (cc) {
1105 case 0:
1106 return 0;
1107 case 1:
1108 case 2:
1109 return -EBUSY;
1110 default:
1111 return cio_start_handle_notoper(sch, lpm);
1112 }
1113}
1114
1115/**
1116 * cio_tm_intrg - perform interrogate function
1117 * @sch - subchannel on which to perform the interrogate function
1118 *
1119 * If the specified subchannel is running in transport-mode, perform the
1120 * interrogate function. Return zero on success, non-zero otherwie.
1121 */
1122int cio_tm_intrg(struct subchannel *sch)
1123{
1124 int cc;
1125
1126 if (!to_io_private(sch)->orb.tm.b)
1127 return -EINVAL;
1128 cc = xsch(sch->schid);
1129 switch (cc) {
1130 case 0:
1131 case 2:
1132 return 0;
1133 case 1:
1134 return -EBUSY;
1135 default:
1136 return -ENODEV;
1137 }
1138}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 6e933aebe013..3b236d20e835 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -3,9 +3,12 @@
3 3
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h>
6#include <asm/chpid.h> 7#include <asm/chpid.h>
8#include <asm/cio.h>
9#include <asm/fcx.h>
10#include <asm/schid.h>
7#include "chsc.h" 11#include "chsc.h"
8#include "schid.h"
9 12
10/* 13/*
11 * path management control word 14 * path management control word
@@ -13,7 +16,7 @@
13struct pmcw { 16struct pmcw {
14 u32 intparm; /* interruption parameter */ 17 u32 intparm; /* interruption parameter */
15 u32 qf : 1; /* qdio facility */ 18 u32 qf : 1; /* qdio facility */
16 u32 res0 : 1; /* reserved zeros */ 19 u32 w : 1;
17 u32 isc : 3; /* interruption sublass */ 20 u32 isc : 3; /* interruption sublass */
18 u32 res5 : 3; /* reserved zeros */ 21 u32 res5 : 3; /* reserved zeros */
19 u32 ena : 1; /* enabled */ 22 u32 ena : 1; /* enabled */
@@ -47,7 +50,7 @@ struct pmcw {
47 */ 50 */
48struct schib { 51struct schib {
49 struct pmcw pmcw; /* path management control word */ 52 struct pmcw pmcw; /* path management control word */
50 struct scsw scsw; /* subchannel status word */ 53 union scsw scsw; /* subchannel status word */
51 __u64 mba; /* measurement block address */ 54 __u64 mba; /* measurement block address */
52 __u8 mda[4]; /* model dependent area */ 55 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 56} __attribute__ ((packed,aligned(4)));
@@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int);
99extern int cio_get_options (struct subchannel *); 102extern int cio_get_options (struct subchannel *);
100extern int cio_modify (struct subchannel *); 103extern int cio_modify (struct subchannel *);
101 104
105int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
106int cio_tm_intrg(struct subchannel *sch);
107
102int cio_create_sch_lock(struct subchannel *); 108int cio_create_sch_lock(struct subchannel *);
103void do_adapter_IO(void); 109void do_adapter_IO(u8 isc);
104void do_IRQ(struct pt_regs *); 110void do_IRQ(struct pt_regs *);
105 111
106/* Use with care. */ 112/* Use with care. */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2808b6833b9e..a90b28c0be57 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev)
341 if (stsch(sch->schid, &sch->schib)) 341 if (stsch(sch->schid, &sch->schib))
342 return -ENODEV; 342 return -ENODEV;
343 343
344 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { 344 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
345 /* Don't copy if a start function is in progress. */ 345 /* Don't copy if a start function is in progress. */
346 if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && 346 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
347 (sch->schib.scsw.actl & 347 (scsw_actl(&sch->schib.scsw) &
348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && 348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
349 (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) 349 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
350 return -EBUSY; 350 return -EBUSY;
351 } 351 }
352 cmb_data = cdev->private->cmb; 352 cmb_data = cdev->private->cmb;
@@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev)
612 free_pages((unsigned long)mem, get_order(size)); 612 free_pages((unsigned long)mem, get_order(size));
613 } else if (!mem) { 613 } else if (!mem) {
614 /* no luck */ 614 /* no luck */
615 printk(KERN_WARNING "cio: failed to allocate area "
616 "for measuring %d subchannels\n",
617 cmb_area.num_channels);
618 ret = -ENOMEM; 615 ret = -ENOMEM;
619 goto out; 616 goto out;
620 } else { 617 } else {
@@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev,
1230 switch (val) { 1227 switch (val) {
1231 case 0: 1228 case 0:
1232 ret = disable_cmf(cdev); 1229 ret = disable_cmf(cdev);
1233 if (ret)
1234 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
1235 break; 1230 break;
1236 case 1: 1231 case 1:
1237 ret = enable_cmf(cdev); 1232 ret = enable_cmf(cdev);
1238 if (ret && ret != -EBUSY)
1239 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
1240 break; 1233 break;
1241 } 1234 }
1242 1235
@@ -1344,8 +1337,7 @@ static int __init init_cmf(void)
1344 * to basic mode. 1337 * to basic mode.
1345 */ 1338 */
1346 if (format == CMF_AUTODETECT) { 1339 if (format == CMF_AUTODETECT) {
1347 if (!css_characteristics_avail || 1340 if (!css_general_characteristics.ext_mb) {
1348 !css_general_characteristics.ext_mb) {
1349 format = CMF_BASIC; 1341 format = CMF_BASIC;
1350 } else { 1342 } else {
1351 format = CMF_EXTENDED; 1343 format = CMF_EXTENDED;
@@ -1365,8 +1357,6 @@ static int __init init_cmf(void)
1365 cmbops = &cmbops_extended; 1357 cmbops = &cmbops_extended;
1366 break; 1358 break;
1367 default: 1359 default:
1368 printk(KERN_ERR "cio: Invalid format %d for channel "
1369 "measurement facility\n", format);
1370 return 1; 1360 return 1;
1371 } 1361 }
1372 1362
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a76956512b2d..46c021d880dc 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 */ 8 */
@@ -14,7 +13,9 @@
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/list.h> 14#include <linux/list.h>
16#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <asm/isc.h>
17 17
18#include "../s390mach.h"
18#include "css.h" 19#include "css.h"
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -30,8 +31,6 @@ static int max_ssid = 0;
30 31
31struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 32struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32 33
33int css_characteristics_avail = 0;
34
35int 34int
36for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 35for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
37{ 36{
@@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid)
121 kfree(sch); 120 kfree(sch);
122 return ERR_PTR(ret); 121 return ERR_PTR(ret);
123 } 122 }
124
125 if (sch->st != SUBCHANNEL_TYPE_IO) {
126 /* For now we ignore all non-io subchannels. */
127 kfree(sch);
128 return ERR_PTR(-EINVAL);
129 }
130
131 /*
132 * Set intparm to subchannel address.
133 * This is fine even on 64bit since the subchannel is always located
134 * under 2G.
135 */
136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
137 ret = cio_modify(sch);
138 if (ret) {
139 kfree(sch->lock);
140 kfree(sch);
141 return ERR_PTR(ret);
142 }
143 return sch; 123 return sch;
144} 124}
145 125
@@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch)
177 return ret; 157 return ret;
178} 158}
179 159
160/**
161 * css_sch_device_unregister - unregister a subchannel
162 * @sch: subchannel to be unregistered
163 */
180void css_sch_device_unregister(struct subchannel *sch) 164void css_sch_device_unregister(struct subchannel *sch)
181{ 165{
182 mutex_lock(&sch->reg_mutex); 166 mutex_lock(&sch->reg_mutex);
183 device_unregister(&sch->dev); 167 if (device_is_registered(&sch->dev))
168 device_unregister(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 169 mutex_unlock(&sch->reg_mutex);
185} 170}
171EXPORT_SYMBOL_GPL(css_sch_device_unregister);
186 172
187static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 173static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
188{ 174{
@@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch)
229 } 215 }
230} 216}
231 217
218static ssize_t type_show(struct device *dev, struct device_attribute *attr,
219 char *buf)
220{
221 struct subchannel *sch = to_subchannel(dev);
222
223 return sprintf(buf, "%01x\n", sch->st);
224}
225
226static DEVICE_ATTR(type, 0444, type_show, NULL);
227
228static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
229 char *buf)
230{
231 struct subchannel *sch = to_subchannel(dev);
232
233 return sprintf(buf, "css:t%01X\n", sch->st);
234}
235
236static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
237
238static struct attribute *subch_attrs[] = {
239 &dev_attr_type.attr,
240 &dev_attr_modalias.attr,
241 NULL,
242};
243
244static struct attribute_group subch_attr_group = {
245 .attrs = subch_attrs,
246};
247
248static struct attribute_group *default_subch_attr_groups[] = {
249 &subch_attr_group,
250 NULL,
251};
252
232static int css_register_subchannel(struct subchannel *sch) 253static int css_register_subchannel(struct subchannel *sch)
233{ 254{
234 int ret; 255 int ret;
@@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch)
237 sch->dev.parent = &channel_subsystems[0]->device; 258 sch->dev.parent = &channel_subsystems[0]->device;
238 sch->dev.bus = &css_bus_type; 259 sch->dev.bus = &css_bus_type;
239 sch->dev.release = &css_subchannel_release; 260 sch->dev.release = &css_subchannel_release;
240 sch->dev.groups = subch_attr_groups; 261 sch->dev.groups = default_subch_attr_groups;
241 /* 262 /*
242 * We don't want to generate uevents for I/O subchannels that don't 263 * We don't want to generate uevents for I/O subchannels that don't
243 * have a working ccw device behind them since they will be 264 * have a working ccw device behind them since they will be
244 * unregistered before they can be used anyway, so we delay the add 265 * unregistered before they can be used anyway, so we delay the add
245 * uevent until after device recognition was successful. 266 * uevent until after device recognition was successful.
267 * Note that we suppress the uevent for all subchannel types;
268 * the subchannel driver can decide itself when it wants to inform
269 * userspace of its existence.
246 */ 270 */
247 if (!cio_is_console(sch->schid)) 271 sch->dev.uevent_suppress = 1;
248 /* Console is special, no need to suppress. */
249 sch->dev.uevent_suppress = 1;
250 css_update_ssd_info(sch); 272 css_update_ssd_info(sch);
251 /* make it known to the system */ 273 /* make it known to the system */
252 ret = css_sch_device_register(sch); 274 ret = css_sch_device_register(sch);
@@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch)
255 sch->schid.ssid, sch->schid.sch_no, ret); 277 sch->schid.ssid, sch->schid.sch_no, ret);
256 return ret; 278 return ret;
257 } 279 }
280 if (!sch->driver) {
281 /*
282 * No driver matched. Generate the uevent now so that
283 * a fitting driver module may be loaded based on the
284 * modalias.
285 */
286 sch->dev.uevent_suppress = 0;
287 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
288 }
258 return ret; 289 return ret;
259} 290}
260 291
261static int css_probe_device(struct subchannel_id schid) 292int css_probe_device(struct subchannel_id schid)
262{ 293{
263 int ret; 294 int ret;
264 struct subchannel *sch; 295 struct subchannel *sch;
@@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib)
301{ 332{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 333 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0; 334 return 0;
335 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
336 return 0;
304 return 1; 337 return 1;
305} 338}
306EXPORT_SYMBOL_GPL(css_sch_is_valid); 339EXPORT_SYMBOL_GPL(css_sch_is_valid);
307 340
308static int css_get_subchannel_status(struct subchannel *sch)
309{
310 struct schib schib;
311
312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
315 return CIO_GONE;
316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
317 return CIO_REVALIDATE;
318 if (!sch->lpm)
319 return CIO_NO_PATH;
320 return CIO_OPER;
321}
322
323static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
324{
325 int event, ret, disc;
326 unsigned long flags;
327 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
328
329 spin_lock_irqsave(sch->lock, flags);
330 disc = device_is_disconnected(sch);
331 if (disc && slow) {
332 /* Disconnected devices are evaluated directly only.*/
333 spin_unlock_irqrestore(sch->lock, flags);
334 return 0;
335 }
336 /* No interrupt after machine check - kill pending timers. */
337 device_kill_pending_timer(sch);
338 if (!disc && !slow) {
339 /* Non-disconnected devices are evaluated on the slow path. */
340 spin_unlock_irqrestore(sch->lock, flags);
341 return -EAGAIN;
342 }
343 event = css_get_subchannel_status(sch);
344 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
345 sch->schid.ssid, sch->schid.sch_no, event,
346 disc ? "disconnected" : "normal",
347 slow ? "slow" : "fast");
348 /* Analyze subchannel status. */
349 action = NONE;
350 switch (event) {
351 case CIO_NO_PATH:
352 if (disc) {
353 /* Check if paths have become available. */
354 action = REPROBE;
355 break;
356 }
357 /* fall through */
358 case CIO_GONE:
359 /* Prevent unwanted effects when opening lock. */
360 cio_disable_subchannel(sch);
361 device_set_disconnected(sch);
362 /* Ask driver what to do with device. */
363 action = UNREGISTER;
364 if (sch->driver && sch->driver->notify) {
365 spin_unlock_irqrestore(sch->lock, flags);
366 ret = sch->driver->notify(sch, event);
367 spin_lock_irqsave(sch->lock, flags);
368 if (ret)
369 action = NONE;
370 }
371 break;
372 case CIO_REVALIDATE:
373 /* Device will be removed, so no notify necessary. */
374 if (disc)
375 /* Reprobe because immediate unregister might block. */
376 action = REPROBE;
377 else
378 action = UNREGISTER_PROBE;
379 break;
380 case CIO_OPER:
381 if (disc)
382 /* Get device operational again. */
383 action = REPROBE;
384 break;
385 }
386 /* Perform action. */
387 ret = 0;
388 switch (action) {
389 case UNREGISTER:
390 case UNREGISTER_PROBE:
391 /* Unregister device (will use subchannel lock). */
392 spin_unlock_irqrestore(sch->lock, flags);
393 css_sch_device_unregister(sch);
394 spin_lock_irqsave(sch->lock, flags);
395
396 /* Reset intparm to zeroes. */
397 sch->schib.pmcw.intparm = 0;
398 cio_modify(sch);
399 break;
400 case REPROBE:
401 device_trigger_reprobe(sch);
402 break;
403 default:
404 break;
405 }
406 spin_unlock_irqrestore(sch->lock, flags);
407 /* Probe if necessary. */
408 if (action == UNREGISTER_PROBE)
409 ret = css_probe_device(sch->schid);
410
411 return ret;
412}
413
414static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 341static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
415{ 342{
416 struct schib schib; 343 struct schib schib;
@@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
429 return css_probe_device(schid); 356 return css_probe_device(schid);
430} 357}
431 358
359static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
360{
361 int ret = 0;
362
363 if (sch->driver) {
364 if (sch->driver->sch_event)
365 ret = sch->driver->sch_event(sch, slow);
366 else
367 dev_dbg(&sch->dev,
368 "Got subchannel machine check but "
369 "no sch_event handler provided.\n");
370 }
371 return ret;
372}
373
432static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 374static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
433{ 375{
434 struct subchannel *sch; 376 struct subchannel *sch;
@@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
596/* 538/*
597 * Called from the machine check handler for subchannel report words. 539 * Called from the machine check handler for subchannel report words.
598 */ 540 */
599void css_process_crw(int rsid1, int rsid2) 541static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
600{ 542{
601 struct subchannel_id mchk_schid; 543 struct subchannel_id mchk_schid;
602 544
603 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 545 if (overflow) {
604 rsid1, rsid2); 546 css_schedule_eval_all();
547 return;
548 }
549 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
550 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
551 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
552 crw0->erc, crw0->rsid);
553 if (crw1)
554 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
555 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
556 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
557 crw1->anc, crw1->erc, crw1->rsid);
605 init_subchannel_id(&mchk_schid); 558 init_subchannel_id(&mchk_schid);
606 mchk_schid.sch_no = rsid1; 559 mchk_schid.sch_no = crw0->rsid;
607 if (rsid2 != 0) 560 if (crw1)
608 mchk_schid.ssid = (rsid2 >> 8) & 3; 561 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
609 562
610 /* 563 /*
611 * Since we are always presented with IPI in the CRW, we have to 564 * Since we are always presented with IPI in the CRW, we have to
612 * use stsch() to find out if the subchannel in question has come 565 * use stsch() to find out if the subchannel in question has come
613 * or gone. 566 * or gone.
@@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
658static void __init 611static void __init
659css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 612css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
660{ 613{
661 if (css_characteristics_avail && css_general_characteristics.mcss) { 614 if (css_general_characteristics.mcss) {
662 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 615 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
663 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 616 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
664 } else { 617 } else {
@@ -795,8 +748,6 @@ init_channel_subsystem (void)
795 ret = chsc_determine_css_characteristics(); 748 ret = chsc_determine_css_characteristics();
796 if (ret == -ENOMEM) 749 if (ret == -ENOMEM)
797 goto out; /* No need to continue. */ 750 goto out; /* No need to continue. */
798 if (ret == 0)
799 css_characteristics_avail = 1;
800 751
801 ret = chsc_alloc_sei_area(); 752 ret = chsc_alloc_sei_area();
802 if (ret) 753 if (ret)
@@ -806,6 +757,10 @@ init_channel_subsystem (void)
806 if (ret) 757 if (ret)
807 goto out; 758 goto out;
808 759
760 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
761 if (ret)
762 goto out;
763
809 if ((ret = bus_register(&css_bus_type))) 764 if ((ret = bus_register(&css_bus_type)))
810 goto out; 765 goto out;
811 766
@@ -836,8 +791,7 @@ init_channel_subsystem (void)
836 ret = device_register(&css->device); 791 ret = device_register(&css->device);
837 if (ret) 792 if (ret)
838 goto out_free_all; 793 goto out_free_all;
839 if (css_characteristics_avail && 794 if (css_chsc_characteristics.secm) {
840 css_chsc_characteristics.secm) {
841 ret = device_create_file(&css->device, 795 ret = device_create_file(&css->device,
842 &dev_attr_cm_enable); 796 &dev_attr_cm_enable);
843 if (ret) 797 if (ret)
@@ -852,7 +806,8 @@ init_channel_subsystem (void)
852 goto out_pseudo; 806 goto out_pseudo;
853 css_init_done = 1; 807 css_init_done = 1;
854 808
855 ctl_set_bit(6, 28); 809 /* Enable default isc for I/O subchannels. */
810 isc_register(IO_SCH_ISC);
856 811
857 for_each_subchannel(__init_channel_subsystem, NULL); 812 for_each_subchannel(__init_channel_subsystem, NULL);
858 return 0; 813 return 0;
@@ -875,7 +830,7 @@ out_unregister:
875 i--; 830 i--;
876 css = channel_subsystems[i]; 831 css = channel_subsystems[i];
877 device_unregister(&css->pseudo_subchannel->dev); 832 device_unregister(&css->pseudo_subchannel->dev);
878 if (css_characteristics_avail && css_chsc_characteristics.secm) 833 if (css_chsc_characteristics.secm)
879 device_remove_file(&css->device, 834 device_remove_file(&css->device,
880 &dev_attr_cm_enable); 835 &dev_attr_cm_enable);
881 device_unregister(&css->device); 836 device_unregister(&css->device);
@@ -883,6 +838,7 @@ out_unregister:
883out_bus: 838out_bus:
884 bus_unregister(&css_bus_type); 839 bus_unregister(&css_bus_type);
885out: 840out:
841 s390_unregister_crw_handler(CRW_RSC_CSS);
886 chsc_free_sei_area(); 842 chsc_free_sei_area();
887 kfree(slow_subchannel_set); 843 kfree(slow_subchannel_set);
888 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 844 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
@@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch)
895 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 851 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
896} 852}
897 853
898/* 854static int css_bus_match(struct device *dev, struct device_driver *drv)
899 * find a driver for a subchannel. They identify by the subchannel
900 * type with the exception that the console subchannel driver has its own
901 * subchannel type although the device is an i/o subchannel
902 */
903static int
904css_bus_match (struct device *dev, struct device_driver *drv)
905{ 855{
906 struct subchannel *sch = to_subchannel(dev); 856 struct subchannel *sch = to_subchannel(dev);
907 struct css_driver *driver = to_cssdriver(drv); 857 struct css_driver *driver = to_cssdriver(drv);
858 struct css_device_id *id;
908 859
909 if (sch->st == driver->subchannel_type) 860 for (id = driver->subchannel_type; id->match_flags; id++) {
910 return 1; 861 if (sch->st == id->type)
862 return 1;
863 }
911 864
912 return 0; 865 return 0;
913} 866}
@@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev)
945 sch->driver->shutdown(sch); 898 sch->driver->shutdown(sch);
946} 899}
947 900
901static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
902{
903 struct subchannel *sch = to_subchannel(dev);
904 int ret;
905
906 ret = add_uevent_var(env, "ST=%01X", sch->st);
907 if (ret)
908 return ret;
909 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
910 return ret;
911}
912
948struct bus_type css_bus_type = { 913struct bus_type css_bus_type = {
949 .name = "css", 914 .name = "css",
950 .match = css_bus_match, 915 .match = css_bus_match,
951 .probe = css_probe, 916 .probe = css_probe,
952 .remove = css_remove, 917 .remove = css_remove,
953 .shutdown = css_shutdown, 918 .shutdown = css_shutdown,
919 .uevent = css_uevent,
954}; 920};
955 921
956/** 922/**
@@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem);
985 951
986MODULE_LICENSE("GPL"); 952MODULE_LICENSE("GPL");
987EXPORT_SYMBOL(css_bus_type); 953EXPORT_SYMBOL(css_bus_type);
988EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index e1913518f354..57ebf120f825 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -9,8 +9,7 @@
9 9
10#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12 12#include <asm/schid.h>
13#include "schid.h"
14 13
15/* 14/*
16 * path grouping stuff 15 * path grouping stuff
@@ -58,20 +57,28 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 57 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 58} __attribute__ ((packed));
60 59
61/*
62 * A css driver handles all subchannels of one type.
63 * Currently, we only care about I/O subchannels (type 0), these
64 * have a ccw_device connected to them.
65 */
66struct subchannel; 60struct subchannel;
61struct chp_link;
62/**
63 * struct css_driver - device driver for subchannels
64 * @owner: owning module
65 * @subchannel_type: subchannel type supported by this driver
66 * @drv: embedded device driver structure
67 * @irq: called on interrupts
68 * @chp_event: called for events affecting a channel path
69 * @sch_event: called for events affecting the subchannel
70 * @probe: function called on probe
71 * @remove: function called on remove
72 * @shutdown: called at device shutdown
73 * @name: name of the device driver
74 */
67struct css_driver { 75struct css_driver {
68 struct module *owner; 76 struct module *owner;
69 unsigned int subchannel_type; 77 struct css_device_id *subchannel_type;
70 struct device_driver drv; 78 struct device_driver drv;
71 void (*irq)(struct subchannel *); 79 void (*irq)(struct subchannel *);
72 int (*notify)(struct subchannel *, int); 80 int (*chp_event)(struct subchannel *, struct chp_link *, int);
73 void (*verify)(struct subchannel *); 81 int (*sch_event)(struct subchannel *, int);
74 void (*termination)(struct subchannel *);
75 int (*probe)(struct subchannel *); 82 int (*probe)(struct subchannel *);
76 int (*remove)(struct subchannel *); 83 int (*remove)(struct subchannel *);
77 void (*shutdown)(struct subchannel *); 84 void (*shutdown)(struct subchannel *);
@@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *); 96extern void css_driver_unregister(struct css_driver *);
90 97
91extern void css_sch_device_unregister(struct subchannel *); 98extern void css_sch_device_unregister(struct subchannel *);
92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 99extern int css_probe_device(struct subchannel_id);
100extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
93extern int css_init_done; 101extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 102int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id, 103 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data); 104 void *), void *data);
97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 105extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
98extern void css_process_crw(int, int);
99extern void css_reiterate_subchannels(void); 106extern void css_reiterate_subchannels(void);
100void css_update_ssd_info(struct subchannel *sch); 107void css_update_ssd_info(struct subchannel *sch);
101 108
@@ -121,20 +128,6 @@ struct channel_subsystem {
121extern struct bus_type css_bus_type; 128extern struct bus_type css_bus_type;
122extern struct channel_subsystem *channel_subsystems[]; 129extern struct channel_subsystem *channel_subsystems[];
123 130
124/* Some helper functions for disconnected state. */
125int device_is_disconnected(struct subchannel *);
126void device_set_disconnected(struct subchannel *);
127void device_trigger_reprobe(struct subchannel *);
128
129/* Helper functions for vary on/off. */
130int device_is_online(struct subchannel *);
131void device_kill_io(struct subchannel *);
132void device_set_intretry(struct subchannel *sch);
133int device_trigger_verify(struct subchannel *sch);
134
135/* Machine check helper function. */
136void device_kill_pending_timer(struct subchannel *);
137
138/* Helper functions to build lists for the slow path. */ 131/* Helper functions to build lists for the slow path. */
139void css_schedule_eval(struct subchannel_id schid); 132void css_schedule_eval(struct subchannel_id schid);
140void css_schedule_eval_all(void); 133void css_schedule_eval_all(void);
@@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *);
145 138
146extern struct workqueue_struct *slow_path_wq; 139extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void); 140void css_wait_for_slow_path(void);
148
149extern struct attribute_group *subch_attr_groups[];
150#endif 141#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e22813db74a2..e818d0c54c09 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -23,7 +22,9 @@
23#include <asm/cio.h> 22#include <asm/cio.h>
24#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
25#include <asm/cmb.h> 24#include <asm/cmb.h>
25#include <asm/isc.h>
26 26
27#include "chp.h"
27#include "cio.h" 28#include "cio.h"
28#include "cio_debug.h" 29#include "cio_debug.h"
29#include "css.h" 30#include "css.h"
@@ -125,19 +126,24 @@ struct bus_type ccw_bus_type;
125static void io_subchannel_irq(struct subchannel *); 126static void io_subchannel_irq(struct subchannel *);
126static int io_subchannel_probe(struct subchannel *); 127static int io_subchannel_probe(struct subchannel *);
127static int io_subchannel_remove(struct subchannel *); 128static int io_subchannel_remove(struct subchannel *);
128static int io_subchannel_notify(struct subchannel *, int);
129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
131static void io_subchannel_shutdown(struct subchannel *); 129static void io_subchannel_shutdown(struct subchannel *);
130static int io_subchannel_sch_event(struct subchannel *, int);
131static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
132 int);
133
134static struct css_device_id io_subchannel_ids[] = {
135 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
136 { /* end of list */ },
137};
138MODULE_DEVICE_TABLE(css, io_subchannel_ids);
132 139
133static struct css_driver io_subchannel_driver = { 140static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
135 .subchannel_type = SUBCHANNEL_TYPE_IO, 142 .subchannel_type = io_subchannel_ids,
136 .name = "io_subchannel", 143 .name = "io_subchannel",
137 .irq = io_subchannel_irq, 144 .irq = io_subchannel_irq,
138 .notify = io_subchannel_notify, 145 .sch_event = io_subchannel_sch_event,
139 .verify = io_subchannel_verify, 146 .chp_event = io_subchannel_chp_event,
140 .termination = io_subchannel_ioterm,
141 .probe = io_subchannel_probe, 147 .probe = io_subchannel_probe,
142 .remove = io_subchannel_remove, 148 .remove = io_subchannel_remove,
143 .shutdown = io_subchannel_shutdown, 149 .shutdown = io_subchannel_shutdown,
@@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
487 ccw_device_set_online(cdev); 493 ccw_device_set_online(cdev);
488 return 0; 494 return 0;
489} 495}
490static void online_store_handle_online(struct ccw_device *cdev, int force) 496static int online_store_handle_online(struct ccw_device *cdev, int force)
491{ 497{
492 int ret; 498 int ret;
493 499
494 ret = online_store_recog_and_online(cdev); 500 ret = online_store_recog_and_online(cdev);
495 if (ret) 501 if (ret)
496 return; 502 return ret;
497 if (force && cdev->private->state == DEV_STATE_BOXED) { 503 if (force && cdev->private->state == DEV_STATE_BOXED) {
498 ret = ccw_device_stlck(cdev); 504 ret = ccw_device_stlck(cdev);
499 if (ret) { 505 if (ret)
500 dev_warn(&cdev->dev, 506 return ret;
501 "ccw_device_stlck returned %d!\n", ret);
502 return;
503 }
504 if (cdev->id.cu_type == 0) 507 if (cdev->id.cu_type == 0)
505 cdev->private->state = DEV_STATE_NOT_OPER; 508 cdev->private->state = DEV_STATE_NOT_OPER;
506 online_store_recog_and_online(cdev); 509 online_store_recog_and_online(cdev);
507 } 510 }
508 511 return 0;
509} 512}
510 513
511static ssize_t online_store (struct device *dev, struct device_attribute *attr, 514static ssize_t online_store (struct device *dev, struct device_attribute *attr,
@@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
538 ret = count; 541 ret = count;
539 break; 542 break;
540 case 1: 543 case 1:
541 online_store_handle_online(cdev, force); 544 ret = online_store_handle_online(cdev, force);
542 ret = count; 545 if (!ret)
546 ret = count;
543 break; 547 break;
544 default: 548 default:
545 ret = -EINVAL; 549 ret = -EINVAL;
@@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
584static DEVICE_ATTR(online, 0644, online_show, online_store); 588static DEVICE_ATTR(online, 0644, online_show, online_store);
585static DEVICE_ATTR(availability, 0444, available_show, NULL); 589static DEVICE_ATTR(availability, 0444, available_show, NULL);
586 590
587static struct attribute * subch_attrs[] = { 591static struct attribute *io_subchannel_attrs[] = {
588 &dev_attr_chpids.attr, 592 &dev_attr_chpids.attr,
589 &dev_attr_pimpampom.attr, 593 &dev_attr_pimpampom.attr,
590 NULL, 594 NULL,
591}; 595};
592 596
593static struct attribute_group subch_attr_group = { 597static struct attribute_group io_subchannel_attr_group = {
594 .attrs = subch_attrs, 598 .attrs = io_subchannel_attrs,
595};
596
597struct attribute_group *subch_attr_groups[] = {
598 &subch_attr_group,
599 NULL,
600}; 599};
601 600
602static struct attribute * ccwdev_attrs[] = { 601static struct attribute * ccwdev_attrs[] = {
@@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch,
790 sch_set_cdev(sch, cdev); 789 sch_set_cdev(sch, cdev);
791 cdev->private->schid = sch->schid; 790 cdev->private->schid = sch->schid;
792 cdev->ccwlock = sch->lock; 791 cdev->ccwlock = sch->lock;
793 device_trigger_reprobe(sch); 792 ccw_device_trigger_reprobe(cdev);
794 spin_unlock_irq(sch->lock); 793 spin_unlock_irq(sch->lock);
795} 794}
796 795
@@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1037 struct ccw_device_private *priv; 1036 struct ccw_device_private *priv;
1038 1037
1039 sch_set_cdev(sch, cdev); 1038 sch_set_cdev(sch, cdev);
1040 sch->driver = &io_subchannel_driver;
1041 cdev->ccwlock = sch->lock; 1039 cdev->ccwlock = sch->lock;
1042 1040
1043 /* Init private data. */ 1041 /* Init private data. */
@@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch)
1122 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1120 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1123} 1121}
1124 1122
1125static int 1123static void io_subchannel_init_fields(struct subchannel *sch)
1126io_subchannel_probe (struct subchannel *sch) 1124{
1125 if (cio_is_console(sch->schid))
1126 sch->opm = 0xff;
1127 else
1128 sch->opm = chp_get_sch_opm(sch);
1129 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1130 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1131
1132 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1133 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1134 sch->schib.pmcw.dev, sch->schid.ssid,
1135 sch->schid.sch_no, sch->schib.pmcw.pim,
1136 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1137 /* Initially set up some fields in the pmcw. */
1138 sch->schib.pmcw.ena = 0;
1139 sch->schib.pmcw.csense = 1; /* concurrent sense */
1140 if ((sch->lpm & (sch->lpm - 1)) != 0)
1141 sch->schib.pmcw.mp = 1; /* multipath mode */
1142 /* clean up possible residual cmf stuff */
1143 sch->schib.pmcw.mme = 0;
1144 sch->schib.pmcw.mbfc = 0;
1145 sch->schib.pmcw.mbi = 0;
1146 sch->schib.mba = 0;
1147}
1148
1149static int io_subchannel_probe(struct subchannel *sch)
1127{ 1150{
1128 struct ccw_device *cdev; 1151 struct ccw_device *cdev;
1129 int rc; 1152 int rc;
@@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch)
1132 1155
1133 cdev = sch_get_cdev(sch); 1156 cdev = sch_get_cdev(sch);
1134 if (cdev) { 1157 if (cdev) {
1158 rc = sysfs_create_group(&sch->dev.kobj,
1159 &io_subchannel_attr_group);
1160 if (rc)
1161 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1162 "attributes for subchannel "
1163 "0.%x.%04x (rc=%d)\n",
1164 sch->schid.ssid, sch->schid.sch_no, rc);
1135 /* 1165 /*
1136 * This subchannel already has an associated ccw_device. 1166 * This subchannel already has an associated ccw_device.
1137 * Register it and exit. This happens for all early 1167 * Throw the delayed uevent for the subchannel, register
1138 * device, e.g. the console. 1168 * the ccw_device and exit. This happens for all early
1169 * devices, e.g. the console.
1139 */ 1170 */
1171 sch->dev.uevent_suppress = 0;
1172 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1140 cdev->dev.groups = ccwdev_attr_groups; 1173 cdev->dev.groups = ccwdev_attr_groups;
1141 device_initialize(&cdev->dev); 1174 device_initialize(&cdev->dev);
1142 ccw_device_register(cdev); 1175 ccw_device_register(cdev);
@@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch)
1152 get_device(&cdev->dev); 1185 get_device(&cdev->dev);
1153 return 0; 1186 return 0;
1154 } 1187 }
1188 io_subchannel_init_fields(sch);
1155 /* 1189 /*
1156 * First check if a fitting device may be found amongst the 1190 * First check if a fitting device may be found amongst the
1157 * disconnected devices or in the orphanage. 1191 * disconnected devices or in the orphanage.
1158 */ 1192 */
1159 dev_id.devno = sch->schib.pmcw.dev; 1193 dev_id.devno = sch->schib.pmcw.dev;
1160 dev_id.ssid = sch->schid.ssid; 1194 dev_id.ssid = sch->schid.ssid;
1195 rc = sysfs_create_group(&sch->dev.kobj,
1196 &io_subchannel_attr_group);
1197 if (rc)
1198 return rc;
1161 /* Allocate I/O subchannel private data. */ 1199 /* Allocate I/O subchannel private data. */
1162 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1200 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1163 GFP_KERNEL | GFP_DMA); 1201 GFP_KERNEL | GFP_DMA);
1164 if (!sch->private) 1202 if (!sch->private) {
1165 return -ENOMEM; 1203 rc = -ENOMEM;
1204 goto out_err;
1205 }
1166 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1206 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1167 if (!cdev) 1207 if (!cdev)
1168 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1208 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch)
1181 } 1221 }
1182 cdev = io_subchannel_create_ccwdev(sch); 1222 cdev = io_subchannel_create_ccwdev(sch);
1183 if (IS_ERR(cdev)) { 1223 if (IS_ERR(cdev)) {
1184 kfree(sch->private); 1224 rc = PTR_ERR(cdev);
1185 return PTR_ERR(cdev); 1225 goto out_err;
1186 } 1226 }
1187 rc = io_subchannel_recog(cdev, sch); 1227 rc = io_subchannel_recog(cdev, sch);
1188 if (rc) { 1228 if (rc) {
@@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch)
1191 spin_unlock_irqrestore(sch->lock, flags); 1231 spin_unlock_irqrestore(sch->lock, flags);
1192 if (cdev->dev.release) 1232 if (cdev->dev.release)
1193 cdev->dev.release(&cdev->dev); 1233 cdev->dev.release(&cdev->dev);
1194 kfree(sch->private); 1234 goto out_err;
1195 } 1235 }
1196 1236 return 0;
1237out_err:
1238 kfree(sch->private);
1239 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1197 return rc; 1240 return rc;
1198} 1241}
1199 1242
@@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch)
1214 ccw_device_unregister(cdev); 1257 ccw_device_unregister(cdev);
1215 put_device(&cdev->dev); 1258 put_device(&cdev->dev);
1216 kfree(sch->private); 1259 kfree(sch->private);
1260 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1217 return 0; 1261 return 0;
1218} 1262}
1219 1263
@@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
1224 cdev = sch_get_cdev(sch); 1268 cdev = sch_get_cdev(sch);
1225 if (!cdev) 1269 if (!cdev)
1226 return 0; 1270 return 0;
1227 if (!cdev->drv) 1271 return ccw_device_notify(cdev, event);
1228 return 0;
1229 if (!cdev->online)
1230 return 0;
1231 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1232} 1272}
1233 1273
1234static void io_subchannel_verify(struct subchannel *sch) 1274static void io_subchannel_verify(struct subchannel *sch)
@@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch)
1240 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1280 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1241} 1281}
1242 1282
1243static void io_subchannel_ioterm(struct subchannel *sch) 1283static int check_for_io_on_path(struct subchannel *sch, int mask)
1244{ 1284{
1245 struct ccw_device *cdev; 1285 int cc;
1246 1286
1247 cdev = sch_get_cdev(sch); 1287 cc = stsch(sch->schid, &sch->schib);
1248 if (!cdev) 1288 if (cc)
1249 return; 1289 return 0;
1250 /* Internal I/O will be retried by the interrupt handler. */ 1290 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1251 if (cdev->private->flags.intretry) 1291 return 1;
1292 return 0;
1293}
1294
1295static void terminate_internal_io(struct subchannel *sch,
1296 struct ccw_device *cdev)
1297{
1298 if (cio_clear(sch)) {
1299 /* Recheck device in case clear failed. */
1300 sch->lpm = 0;
1301 if (cdev->online)
1302 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1303 else
1304 css_schedule_eval(sch->schid);
1252 return; 1305 return;
1306 }
1253 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1307 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1308 /* Request retry of internal operation. */
1309 cdev->private->flags.intretry = 1;
1310 /* Call handler. */
1254 if (cdev->handler) 1311 if (cdev->handler)
1255 cdev->handler(cdev, cdev->private->intparm, 1312 cdev->handler(cdev, cdev->private->intparm,
1256 ERR_PTR(-EIO)); 1313 ERR_PTR(-EIO));
1257} 1314}
1258 1315
1316static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1317{
1318 struct ccw_device *cdev;
1319
1320 cdev = sch_get_cdev(sch);
1321 if (!cdev)
1322 return;
1323 if (check_for_io_on_path(sch, mask)) {
1324 if (cdev->private->state == DEV_STATE_ONLINE)
1325 ccw_device_kill_io(cdev);
1326 else {
1327 terminate_internal_io(sch, cdev);
1328 /* Re-start path verification. */
1329 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1330 }
1331 } else
1332 /* trigger path verification. */
1333 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1334
1335}
1336
1337static int io_subchannel_chp_event(struct subchannel *sch,
1338 struct chp_link *link, int event)
1339{
1340 int mask;
1341
1342 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1343 if (!mask)
1344 return 0;
1345 switch (event) {
1346 case CHP_VARY_OFF:
1347 sch->opm &= ~mask;
1348 sch->lpm &= ~mask;
1349 io_subchannel_terminate_path(sch, mask);
1350 break;
1351 case CHP_VARY_ON:
1352 sch->opm |= mask;
1353 sch->lpm |= mask;
1354 io_subchannel_verify(sch);
1355 break;
1356 case CHP_OFFLINE:
1357 if (stsch(sch->schid, &sch->schib))
1358 return -ENXIO;
1359 if (!css_sch_is_valid(&sch->schib))
1360 return -ENODEV;
1361 io_subchannel_terminate_path(sch, mask);
1362 break;
1363 case CHP_ONLINE:
1364 if (stsch(sch->schid, &sch->schib))
1365 return -ENXIO;
1366 sch->lpm |= mask & sch->opm;
1367 io_subchannel_verify(sch);
1368 break;
1369 }
1370 return 0;
1371}
1372
1259static void 1373static void
1260io_subchannel_shutdown(struct subchannel *sch) 1374io_subchannel_shutdown(struct subchannel *sch)
1261{ 1375{
@@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch)
1285 cio_disable_subchannel(sch); 1399 cio_disable_subchannel(sch);
1286} 1400}
1287 1401
1402static int io_subchannel_get_status(struct subchannel *sch)
1403{
1404 struct schib schib;
1405
1406 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1407 return CIO_GONE;
1408 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1409 return CIO_REVALIDATE;
1410 if (!sch->lpm)
1411 return CIO_NO_PATH;
1412 return CIO_OPER;
1413}
1414
1415static int device_is_disconnected(struct ccw_device *cdev)
1416{
1417 if (!cdev)
1418 return 0;
1419 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1420 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1421}
1422
1423static int recovery_check(struct device *dev, void *data)
1424{
1425 struct ccw_device *cdev = to_ccwdev(dev);
1426 int *redo = data;
1427
1428 spin_lock_irq(cdev->ccwlock);
1429 switch (cdev->private->state) {
1430 case DEV_STATE_DISCONNECTED:
1431 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1432 cdev->private->dev_id.ssid,
1433 cdev->private->dev_id.devno);
1434 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1435 *redo = 1;
1436 break;
1437 case DEV_STATE_DISCONNECTED_SENSE_ID:
1438 *redo = 1;
1439 break;
1440 }
1441 spin_unlock_irq(cdev->ccwlock);
1442
1443 return 0;
1444}
1445
1446static void recovery_work_func(struct work_struct *unused)
1447{
1448 int redo = 0;
1449
1450 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1451 if (redo) {
1452 spin_lock_irq(&recovery_lock);
1453 if (!timer_pending(&recovery_timer)) {
1454 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1455 recovery_phase++;
1456 mod_timer(&recovery_timer, jiffies +
1457 recovery_delay[recovery_phase] * HZ);
1458 }
1459 spin_unlock_irq(&recovery_lock);
1460 } else
1461 CIO_MSG_EVENT(4, "recovery: end\n");
1462}
1463
1464static DECLARE_WORK(recovery_work, recovery_work_func);
1465
1466static void recovery_func(unsigned long data)
1467{
1468 /*
1469 * We can't do our recovery in softirq context and it's not
1470 * performance critical, so we schedule it.
1471 */
1472 schedule_work(&recovery_work);
1473}
1474
1475static void ccw_device_schedule_recovery(void)
1476{
1477 unsigned long flags;
1478
1479 CIO_MSG_EVENT(4, "recovery: schedule\n");
1480 spin_lock_irqsave(&recovery_lock, flags);
1481 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1482 recovery_phase = 0;
1483 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1484 }
1485 spin_unlock_irqrestore(&recovery_lock, flags);
1486}
1487
1488static void device_set_disconnected(struct ccw_device *cdev)
1489{
1490 if (!cdev)
1491 return;
1492 ccw_device_set_timeout(cdev, 0);
1493 cdev->private->flags.fake_irb = 0;
1494 cdev->private->state = DEV_STATE_DISCONNECTED;
1495 if (cdev->online)
1496 ccw_device_schedule_recovery();
1497}
1498
1499static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1500{
1501 int event, ret, disc;
1502 unsigned long flags;
1503 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1504 struct ccw_device *cdev;
1505
1506 spin_lock_irqsave(sch->lock, flags);
1507 cdev = sch_get_cdev(sch);
1508 disc = device_is_disconnected(cdev);
1509 if (disc && slow) {
1510 /* Disconnected devices are evaluated directly only.*/
1511 spin_unlock_irqrestore(sch->lock, flags);
1512 return 0;
1513 }
1514 /* No interrupt after machine check - kill pending timers. */
1515 if (cdev)
1516 ccw_device_set_timeout(cdev, 0);
1517 if (!disc && !slow) {
1518 /* Non-disconnected devices are evaluated on the slow path. */
1519 spin_unlock_irqrestore(sch->lock, flags);
1520 return -EAGAIN;
1521 }
1522 event = io_subchannel_get_status(sch);
1523 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1524 sch->schid.ssid, sch->schid.sch_no, event,
1525 disc ? "disconnected" : "normal",
1526 slow ? "slow" : "fast");
1527 /* Analyze subchannel status. */
1528 action = NONE;
1529 switch (event) {
1530 case CIO_NO_PATH:
1531 if (disc) {
1532 /* Check if paths have become available. */
1533 action = REPROBE;
1534 break;
1535 }
1536 /* fall through */
1537 case CIO_GONE:
1538 /* Prevent unwanted effects when opening lock. */
1539 cio_disable_subchannel(sch);
1540 device_set_disconnected(cdev);
1541 /* Ask driver what to do with device. */
1542 action = UNREGISTER;
1543 spin_unlock_irqrestore(sch->lock, flags);
1544 ret = io_subchannel_notify(sch, event);
1545 spin_lock_irqsave(sch->lock, flags);
1546 if (ret)
1547 action = NONE;
1548 break;
1549 case CIO_REVALIDATE:
1550 /* Device will be removed, so no notify necessary. */
1551 if (disc)
1552 /* Reprobe because immediate unregister might block. */
1553 action = REPROBE;
1554 else
1555 action = UNREGISTER_PROBE;
1556 break;
1557 case CIO_OPER:
1558 if (disc)
1559 /* Get device operational again. */
1560 action = REPROBE;
1561 break;
1562 }
1563 /* Perform action. */
1564 ret = 0;
1565 switch (action) {
1566 case UNREGISTER:
1567 case UNREGISTER_PROBE:
1568 /* Unregister device (will use subchannel lock). */
1569 spin_unlock_irqrestore(sch->lock, flags);
1570 css_sch_device_unregister(sch);
1571 spin_lock_irqsave(sch->lock, flags);
1572
1573 /* Reset intparm to zeroes. */
1574 sch->schib.pmcw.intparm = 0;
1575 cio_modify(sch);
1576 break;
1577 case REPROBE:
1578 ccw_device_trigger_reprobe(cdev);
1579 break;
1580 default:
1581 break;
1582 }
1583 spin_unlock_irqrestore(sch->lock, flags);
1584 /* Probe if necessary. */
1585 if (action == UNREGISTER_PROBE)
1586 ret = css_probe_device(sch->schid);
1587
1588 return ret;
1589}
1590
1288#ifdef CONFIG_CCW_CONSOLE 1591#ifdef CONFIG_CCW_CONSOLE
1289static struct ccw_device console_cdev; 1592static struct ccw_device console_cdev;
1290static struct ccw_device_private console_private; 1593static struct ccw_device_private console_private;
@@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void)
1297 return &ccw_console_lock; 1600 return &ccw_console_lock;
1298} 1601}
1299 1602
1300static int 1603static int ccw_device_console_enable(struct ccw_device *cdev,
1301ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1604 struct subchannel *sch)
1302{ 1605{
1303 int rc; 1606 int rc;
1304 1607
1305 /* Attach subchannel private data. */ 1608 /* Attach subchannel private data. */
1306 sch->private = cio_get_console_priv(); 1609 sch->private = cio_get_console_priv();
1307 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1610 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1611 io_subchannel_init_fields(sch);
1612 sch->driver = &io_subchannel_driver;
1308 /* Initialize the ccw_device structure. */ 1613 /* Initialize the ccw_device structure. */
1309 cdev->dev.parent= &sch->dev; 1614 cdev->dev.parent= &sch->dev;
1310 rc = io_subchannel_recog(cdev, sch); 1615 rc = io_subchannel_recog(cdev, sch);
@@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1515 return sch->schid; 1820 return sch->schid;
1516} 1821}
1517 1822
1518static int recovery_check(struct device *dev, void *data)
1519{
1520 struct ccw_device *cdev = to_ccwdev(dev);
1521 int *redo = data;
1522
1523 spin_lock_irq(cdev->ccwlock);
1524 switch (cdev->private->state) {
1525 case DEV_STATE_DISCONNECTED:
1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1527 cdev->private->dev_id.ssid,
1528 cdev->private->dev_id.devno);
1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1530 *redo = 1;
1531 break;
1532 case DEV_STATE_DISCONNECTED_SENSE_ID:
1533 *redo = 1;
1534 break;
1535 }
1536 spin_unlock_irq(cdev->ccwlock);
1537
1538 return 0;
1539}
1540
1541static void recovery_work_func(struct work_struct *unused)
1542{
1543 int redo = 0;
1544
1545 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1546 if (redo) {
1547 spin_lock_irq(&recovery_lock);
1548 if (!timer_pending(&recovery_timer)) {
1549 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1550 recovery_phase++;
1551 mod_timer(&recovery_timer, jiffies +
1552 recovery_delay[recovery_phase] * HZ);
1553 }
1554 spin_unlock_irq(&recovery_lock);
1555 } else
1556 CIO_MSG_EVENT(4, "recovery: end\n");
1557}
1558
1559static DECLARE_WORK(recovery_work, recovery_work_func);
1560
1561static void recovery_func(unsigned long data)
1562{
1563 /*
1564 * We can't do our recovery in softirq context and it's not
1565 * performance critical, so we schedule it.
1566 */
1567 schedule_work(&recovery_work);
1568}
1569
1570void ccw_device_schedule_recovery(void)
1571{
1572 unsigned long flags;
1573
1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1575 spin_lock_irqsave(&recovery_lock, flags);
1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1577 recovery_phase = 0;
1578 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1579 }
1580 spin_unlock_irqrestore(&recovery_lock, flags);
1581}
1582
1583MODULE_LICENSE("GPL"); 1823MODULE_LICENSE("GPL");
1584EXPORT_SYMBOL(ccw_device_set_online); 1824EXPORT_SYMBOL(ccw_device_set_online);
1585EXPORT_SYMBOL(ccw_device_set_offline); 1825EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index cb08092be39f..9800a8335a3f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
88int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
89int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
90 90
91void ccw_device_schedule_recovery(void);
92
93/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 93void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
118 116
119int ccw_device_stlck(struct ccw_device *); 117int ccw_device_stlck(struct ccw_device *);
120 118
119/* Helper function for machine check handling. */
120void ccw_device_trigger_reprobe(struct ccw_device *);
121void ccw_device_kill_io(struct ccw_device *);
122int ccw_device_notify(struct ccw_device *, int);
123
121/* qdio needs this. */ 124/* qdio needs this. */
122void ccw_device_set_timeout(struct ccw_device *, int); 125void ccw_device_set_timeout(struct ccw_device *, int);
123extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 126extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e268d5a77c12..8b5fe57fb2f3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device_fsm.c 2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling 3 * finite state machine for device handling
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 8 */
@@ -27,65 +26,6 @@
27 26
28static int timeout_log_enabled; 27static int timeout_log_enabled;
29 28
30int
31device_is_online(struct subchannel *sch)
32{
33 struct ccw_device *cdev;
34
35 cdev = sch_get_cdev(sch);
36 if (!cdev)
37 return 0;
38 return (cdev->private->state == DEV_STATE_ONLINE);
39}
40
41int
42device_is_disconnected(struct subchannel *sch)
43{
44 struct ccw_device *cdev;
45
46 cdev = sch_get_cdev(sch);
47 if (!cdev)
48 return 0;
49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51}
52
53void
54device_set_disconnected(struct subchannel *sch)
55{
56 struct ccw_device *cdev;
57
58 cdev = sch_get_cdev(sch);
59 if (!cdev)
60 return;
61 ccw_device_set_timeout(cdev, 0);
62 cdev->private->flags.fake_irb = 0;
63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
66}
67
68void device_set_intretry(struct subchannel *sch)
69{
70 struct ccw_device *cdev;
71
72 cdev = sch_get_cdev(sch);
73 if (!cdev)
74 return;
75 cdev->private->flags.intretry = 1;
76}
77
78int device_trigger_verify(struct subchannel *sch)
79{
80 struct ccw_device *cdev;
81
82 cdev = sch_get_cdev(sch);
83 if (!cdev || !cdev->online)
84 return -EINVAL;
85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 return 0;
87}
88
89static int __init ccw_timeout_log_setup(char *unused) 29static int __init ccw_timeout_log_setup(char *unused)
90{ 30{
91 timeout_log_enabled = 1; 31 timeout_log_enabled = 1;
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev)
99 struct schib schib; 39 struct schib schib;
100 struct subchannel *sch; 40 struct subchannel *sch;
101 struct io_subchannel_private *private; 41 struct io_subchannel_private *private;
42 union orb *orb;
102 int cc; 43 int cc;
103 44
104 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb;
106 cc = stsch(sch->schid, &schib); 48 cc = stsch(sch->schid, &schib);
107 49
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n"); 52 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0); 54 orb, sizeof(*orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 57 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 58 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117 59
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || 60 if (orb->tm.b) {
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws) 61 printk(KERN_WARNING "cio: orb indicates transport mode\n");
120 printk(KERN_WARNING "cio: last channel program (intern):\n"); 62 printk(KERN_WARNING "cio: last tcw:\n");
121 else 63 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
122 printk(KERN_WARNING "cio: last channel program:\n"); 64 (void *)(addr_t)orb->tm.tcw,
123 65 sizeof(struct tcw), 0);
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 } else {
125 (void *)(addr_t)private->orb.cpa, 67 printk(KERN_WARNING "cio: orb indicates command mode\n");
126 sizeof(struct ccw1), 0); 68 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
69 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
70 printk(KERN_WARNING "cio: last channel program "
71 "(intern):\n");
72 else
73 printk(KERN_WARNING "cio: last channel program:\n");
74
75 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
76 (void *)(addr_t)orb->cmd.cpa,
77 sizeof(struct ccw1), 0);
78 }
127 printk(KERN_WARNING "cio: ccw device state: %d\n", 79 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state); 80 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 81 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
171 add_timer(&cdev->private->timer); 123 add_timer(&cdev->private->timer);
172} 124}
173 125
174/* Kill any pending timers after machine check. */
175void
176device_kill_pending_timer(struct subchannel *sch)
177{
178 struct ccw_device *cdev;
179
180 cdev = sch_get_cdev(sch);
181 if (!cdev)
182 return;
183 ccw_device_set_timeout(cdev, 0);
184}
185
186/* 126/*
187 * Cancel running i/o. This is called repeatedly since halt/clear are 127 * Cancel running i/o. This is called repeatedly since halt/clear are
188 * asynchronous operations. We do one try with cio_cancel, two tries 128 * asynchronous operations. We do one try with cio_cancel, two tries
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
205 /* Not operational -> done. */ 145 /* Not operational -> done. */
206 return 0; 146 return 0;
207 /* Stage 1: cancel io. */ 147 /* Stage 1: cancel io. */
208 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
209 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
210 ret = cio_cancel(sch); 150 if (!scsw_is_tm(&sch->schib.scsw)) {
211 if (ret != -EINVAL) 151 ret = cio_cancel(sch);
212 return ret; 152 if (ret != -EINVAL)
213 /* cancel io unsuccessful. From now on it is asynchronous. */ 153 return ret;
154 }
155 /* cancel io unsuccessful or not applicable (transport mode).
156 * Continue with asynchronous instructions. */
214 cdev->private->iretry = 3; /* 3 halt retries. */ 157 cdev->private->iretry = 3; /* 3 halt retries. */
215 } 158 }
216 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
217 /* Stage 2: halt io. */ 160 /* Stage 2: halt io. */
218 if (cdev->private->iretry) { 161 if (cdev->private->iretry) {
219 cdev->private->iretry--; 162 cdev->private->iretry--;
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
388 } 331 }
389} 332}
390 333
334int ccw_device_notify(struct ccw_device *cdev, int event)
335{
336 if (!cdev->drv)
337 return 0;
338 if (!cdev->online)
339 return 0;
340 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
341}
342
391static void 343static void
392ccw_device_oper_notify(struct work_struct *work) 344ccw_device_oper_notify(struct work_struct *work)
393{ 345{
394 struct ccw_device_private *priv; 346 struct ccw_device_private *priv;
395 struct ccw_device *cdev; 347 struct ccw_device *cdev;
396 struct subchannel *sch;
397 int ret; 348 int ret;
398 unsigned long flags;
399 349
400 priv = container_of(work, struct ccw_device_private, kick_work); 350 priv = container_of(work, struct ccw_device_private, kick_work);
401 cdev = priv->cdev; 351 cdev = priv->cdev;
402 spin_lock_irqsave(cdev->ccwlock, flags); 352 ret = ccw_device_notify(cdev, CIO_OPER);
403 sch = to_subchannel(cdev->dev.parent);
404 if (sch->driver && sch->driver->notify) {
405 spin_unlock_irqrestore(cdev->ccwlock, flags);
406 ret = sch->driver->notify(sch, CIO_OPER);
407 spin_lock_irqsave(cdev->ccwlock, flags);
408 } else
409 ret = 0;
410 if (ret) { 353 if (ret) {
411 /* Reenable channel measurements, if needed. */ 354 /* Reenable channel measurements, if needed. */
412 spin_unlock_irqrestore(cdev->ccwlock, flags);
413 cmf_reenable(cdev); 355 cmf_reenable(cdev);
414 spin_lock_irqsave(cdev->ccwlock, flags);
415 wake_up(&cdev->private->wait_q); 356 wake_up(&cdev->private->wait_q);
416 } 357 } else
417 spin_unlock_irqrestore(cdev->ccwlock, flags);
418 if (!ret)
419 /* Driver doesn't want device back. */ 358 /* Driver doesn't want device back. */
420 ccw_device_do_unreg_rereg(work); 359 ccw_device_do_unreg_rereg(work);
421} 360}
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
621 /* Deliver fake irb to device driver, if needed. */ 560 /* Deliver fake irb to device driver, if needed. */
622 if (cdev->private->flags.fake_irb) { 561 if (cdev->private->flags.fake_irb) {
623 memset(&cdev->private->irb, 0, sizeof(struct irb)); 562 memset(&cdev->private->irb, 0, sizeof(struct irb));
624 cdev->private->irb.scsw.cc = 1; 563 cdev->private->irb.scsw.cmd.cc = 1;
625 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; 564 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
626 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; 565 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
627 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; 566 cdev->private->irb.scsw.cmd.stctl =
567 SCSW_STCTL_STATUS_PEND;
628 cdev->private->flags.fake_irb = 0; 568 cdev->private->flags.fake_irb = 0;
629 if (cdev->handler) 569 if (cdev->handler)
630 cdev->handler(cdev, cdev->private->intparm, 570 cdev->handler(cdev, cdev->private->intparm,
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev)
718 sch = to_subchannel(cdev->dev.parent); 658 sch = to_subchannel(cdev->dev.parent);
719 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 659 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
720 return -ENODEV; 660 return -ENODEV;
721 if (cdev->private->state != DEV_STATE_ONLINE) { 661 if (scsw_actl(&sch->schib.scsw) != 0)
722 if (sch->schib.scsw.actl != 0)
723 return -EBUSY;
724 return -EINVAL;
725 }
726 if (sch->schib.scsw.actl != 0)
727 return -EBUSY; 662 return -EBUSY;
663 if (cdev->private->state != DEV_STATE_ONLINE)
664 return -EINVAL;
728 /* Are we doing path grouping? */ 665 /* Are we doing path grouping? */
729 if (!cdev->private->options.pgroup) { 666 if (!cdev->private->options.pgroup) {
730 /* No, set state offline immediately. */ 667 /* No, set state offline immediately. */
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
799 */ 736 */
800 stsch(sch->schid, &sch->schib); 737 stsch(sch->schid, &sch->schib);
801 738
802 if (sch->schib.scsw.actl != 0 || 739 if (scsw_actl(&sch->schib.scsw) != 0 ||
803 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || 740 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
804 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 741 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
805 /* 742 /*
806 * No final status yet or final status not yet delivered 743 * No final status yet or final status not yet delivered
807 * to the device driver. Can't do path verfication now, 744 * to the device driver. Can't do path verfication now,
@@ -823,13 +760,13 @@ static void
823ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 760ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
824{ 761{
825 struct irb *irb; 762 struct irb *irb;
763 int is_cmd;
826 764
827 irb = (struct irb *) __LC_IRB; 765 irb = (struct irb *) __LC_IRB;
766 is_cmd = !scsw_is_tm(&irb->scsw);
828 /* Check for unsolicited interrupt. */ 767 /* Check for unsolicited interrupt. */
829 if ((irb->scsw.stctl == 768 if (!scsw_is_solicited(&irb->scsw)) {
830 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 769 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
831 && (!irb->scsw.cc)) {
832 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
833 !irb->esw.esw0.erw.cons) { 770 !irb->esw.esw0.erw.cons) {
834 /* Unit check but no sense data. Need basic sense. */ 771 /* Unit check but no sense data. Need basic sense. */
835 if (ccw_device_do_sense(cdev, irb) != 0) 772 if (ccw_device_do_sense(cdev, irb) != 0)
@@ -848,7 +785,7 @@ call_handler_unsol:
848 } 785 }
849 /* Accumulate status and find out if a basic sense is needed. */ 786 /* Accumulate status and find out if a basic sense is needed. */
850 ccw_device_accumulate_irb(cdev, irb); 787 ccw_device_accumulate_irb(cdev, irb);
851 if (cdev->private->flags.dosense) { 788 if (is_cmd && cdev->private->flags.dosense) {
852 if (ccw_device_do_sense(cdev, irb) == 0) { 789 if (ccw_device_do_sense(cdev, irb) == 0) {
853 cdev->private->state = DEV_STATE_W4SENSE; 790 cdev->private->state = DEV_STATE_W4SENSE;
854 } 791 }
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
892 829
893 irb = (struct irb *) __LC_IRB; 830 irb = (struct irb *) __LC_IRB;
894 /* Check for unsolicited interrupt. */ 831 /* Check for unsolicited interrupt. */
895 if (irb->scsw.stctl == 832 if (scsw_stctl(&irb->scsw) ==
896 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 833 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
897 if (irb->scsw.cc == 1) 834 if (scsw_cc(&irb->scsw) == 1)
898 /* Basic sense hasn't started. Try again. */ 835 /* Basic sense hasn't started. Try again. */
899 ccw_device_do_sense(cdev, irb); 836 ccw_device_do_sense(cdev, irb);
900 else { 837 else {
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
912 * only deliver the halt/clear interrupt to the device driver as if it 849 * only deliver the halt/clear interrupt to the device driver as if it
913 * had killed the original request. 850 * had killed the original request.
914 */ 851 */
915 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 852 if (scsw_fctl(&irb->scsw) &
853 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
916 /* Retry Basic Sense if requested. */ 854 /* Retry Basic Sense if requested. */
917 if (cdev->private->flags.intretry) { 855 if (cdev->private->flags.intretry) {
918 cdev->private->flags.intretry = 0; 856 cdev->private->flags.intretry = 0;
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
986 ERR_PTR(-EIO)); 924 ERR_PTR(-EIO));
987} 925}
988 926
989void device_kill_io(struct subchannel *sch) 927void ccw_device_kill_io(struct ccw_device *cdev)
990{ 928{
991 int ret; 929 int ret;
992 struct ccw_device *cdev;
993 930
994 cdev = sch_get_cdev(sch);
995 ret = ccw_device_cancel_halt_clear(cdev); 931 ret = ccw_device_cancel_halt_clear(cdev);
996 if (ret == -EBUSY) { 932 if (ret == -EBUSY) {
997 ccw_device_set_timeout(cdev, 3*HZ); 933 ccw_device_set_timeout(cdev, 3*HZ);
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1021 case DEV_EVENT_INTERRUPT: 957 case DEV_EVENT_INTERRUPT:
1022 irb = (struct irb *) __LC_IRB; 958 irb = (struct irb *) __LC_IRB;
1023 /* Check for unsolicited interrupt. */ 959 /* Check for unsolicited interrupt. */
1024 if ((irb->scsw.stctl == 960 if ((scsw_stctl(&irb->scsw) ==
1025 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 961 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1026 (!irb->scsw.cc)) 962 (!scsw_cc(&irb->scsw)))
1027 /* FIXME: we should restart stlck here, but this 963 /* FIXME: we should restart stlck here, but this
1028 * is extremely unlikely ... */ 964 * is extremely unlikely ... */
1029 goto out_wakeup; 965 goto out_wakeup;
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1055 ccw_device_sense_id_start(cdev); 991 ccw_device_sense_id_start(cdev);
1056} 992}
1057 993
1058void 994void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1059device_trigger_reprobe(struct subchannel *sch)
1060{ 995{
1061 struct ccw_device *cdev; 996 struct subchannel *sch;
1062 997
1063 cdev = sch_get_cdev(sch);
1064 if (!cdev)
1065 return;
1066 if (cdev->private->state != DEV_STATE_DISCONNECTED) 998 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 return; 999 return;
1068 1000
1001 sch = to_subchannel(cdev->dev.parent);
1069 /* Update some values. */ 1002 /* Update some values. */
1070 if (stsch(sch->schid, &sch->schib)) 1003 if (stsch(sch->schid, &sch->schib))
1071 return; 1004 return;
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch)
1081 sch->schib.pmcw.ena = 0; 1014 sch->schib.pmcw.ena = 0;
1082 if ((sch->lpm & (sch->lpm - 1)) != 0) 1015 if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 sch->schib.pmcw.mp = 1; 1016 sch->schib.pmcw.mp = 1;
1084 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 /* We should also udate ssd info, but this has to wait. */ 1017 /* We should also udate ssd info, but this has to wait. */
1086 /* Check if this is another device which appeared on the same sch. */ 1018 /* Check if this is another device which appeared on the same sch. */
1087 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1019 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index cba7020517ed..1bdaa614e34f 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
196 irb = &cdev->private->irb; 196 irb = &cdev->private->irb;
197 197
198 /* Check the error cases. */ 198 /* Check the error cases. */
199 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
200 /* Retry Sense ID if requested. */ 200 /* Retry Sense ID if requested. */
201 if (cdev->private->flags.intretry) { 201 if (cdev->private->flags.intretry) {
202 cdev->private->flags.intretry = 0; 202 cdev->private->flags.intretry = 0;
@@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
234 irb->ecw[6], irb->ecw[7]); 234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN; 235 return -EAGAIN;
236 } 236 }
237 if (irb->scsw.cc == 3) { 237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm; 238 u8 lpm;
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.cmd.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
@@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
248 } 248 }
249 249
250 /* Did we get a proper answer ? */ 250 /* Did we get a proper answer ? */
251 if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && 251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) { 252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.count < sizeof(struct senseid) - 8) 253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1; 254 cdev->private->flags.esid = 1;
255 return 0; /* Success */ 255 return 0; /* Success */
256 } 256 }
@@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
260 "subchannel 0.%x.%04x returns status %02X%02X\n", 260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid, 261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no, 262 sch->schid.sch_no,
263 irb->scsw.dstat, irb->scsw.cstat); 263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 264 return -EAGAIN;
265} 265}
266 266
@@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
277 sch = to_subchannel(cdev->dev.parent); 277 sch = to_subchannel(cdev->dev.parent);
278 irb = (struct irb *) __LC_IRB; 278 irb = (struct irb *) __LC_IRB;
279 /* Retry sense id, if needed. */ 279 /* Retry sense id, if needed. */
280 if (irb->scsw.stctl == 280 if (irb->scsw.cmd.stctl ==
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
282 if ((irb->scsw.cc == 1) || !irb->scsw.actl) { 282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
283 ret = __ccw_device_sense_id_start(cdev); 283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY) 284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret); 285 ccw_device_sense_id_done(cdev, ret);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f308ad55a6d5..ee1a28310fbb 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -17,6 +17,7 @@
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h> 19#include <asm/chpid.h>
20#include <asm/fcx.h>
20 21
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
179 return -EBUSY; 180 return -EBUSY;
180 } 181 }
181 if (cdev->private->state != DEV_STATE_ONLINE || 182 if (cdev->private->state != DEV_STATE_ONLINE ||
182 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 183 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
183 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 184 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
184 cdev->private->flags.doverify) 185 cdev->private->flags.doverify)
185 return -EBUSY; 186 return -EBUSY;
186 ret = cio_set_options (sch, flags); 187 ret = cio_set_options (sch, flags);
@@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev)
379 if (cdev->private->state == DEV_STATE_NOT_OPER) 380 if (cdev->private->state == DEV_STATE_NOT_OPER)
380 return -ENODEV; 381 return -ENODEV;
381 if (cdev->private->state != DEV_STATE_ONLINE || 382 if (cdev->private->state != DEV_STATE_ONLINE ||
382 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 383 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
383 return -EINVAL; 384 return -EINVAL;
384 return cio_resume(sch); 385 return cio_resume(sch);
385} 386}
@@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev)
404 * - fast notification was requested (primary status) 405 * - fast notification was requested (primary status)
405 * - unsolicited interrupts 406 * - unsolicited interrupts
406 */ 407 */
407 stctl = cdev->private->irb.scsw.stctl; 408 stctl = scsw_stctl(&cdev->private->irb.scsw);
408 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 409 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
409 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 410 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
410 (stctl == SCSW_STCTL_STATUS_PEND); 411 (stctl == SCSW_STCTL_STATUS_PEND);
@@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev)
528 cio_disable_subchannel(sch); //FIXME: return code? 529 cio_disable_subchannel(sch); //FIXME: return code?
529 goto out_unlock; 530 goto out_unlock;
530 } 531 }
531 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
532 spin_unlock_irqrestore(sch->lock, flags); 533 spin_unlock_irqrestore(sch->lock, flags);
533 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 534 wait_event(cdev->private->wait_q,
535 cdev->private->irb.scsw.cmd.actl == 0);
534 spin_lock_irqsave(sch->lock, flags); 536 spin_lock_irqsave(sch->lock, flags);
535 cio_disable_subchannel(sch); //FIXME: return code? 537 cio_disable_subchannel(sch); //FIXME: return code?
536 if ((cdev->private->irb.scsw.dstat != 538 if ((cdev->private->irb.scsw.cmd.dstat !=
537 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
538 (cdev->private->irb.scsw.cstat != 0)) 540 (cdev->private->irb.scsw.cmd.cstat != 0))
539 ret = -EIO; 541 ret = -EIO;
540 /* Clear irb. */ 542 /* Clear irb. */
541 memset(&cdev->private->irb, 0, sizeof(struct irb)); 543 memset(&cdev->private->irb, 0, sizeof(struct irb));
@@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
568} 570}
569EXPORT_SYMBOL(ccw_device_get_id); 571EXPORT_SYMBOL(ccw_device_get_id);
570 572
573/**
574 * ccw_device_tm_start_key - perform start function
575 * @cdev: ccw device on which to perform the start function
576 * @tcw: transport-command word to be started
577 * @intparm: user defined parameter to be passed to the interrupt handler
578 * @lpm: mask of paths to use
579 * @key: storage key to use for storage access
580 *
581 * Start the tcw on the given ccw device. Return zero on success, non-zero
582 * otherwise.
583 */
584int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
585 unsigned long intparm, u8 lpm, u8 key)
586{
587 struct subchannel *sch;
588 int rc;
589
590 sch = to_subchannel(cdev->dev.parent);
591 if (cdev->private->state != DEV_STATE_ONLINE)
592 return -EIO;
593 /* Adjust requested path mask to excluded varied off paths. */
594 if (lpm) {
595 lpm &= sch->opm;
596 if (lpm == 0)
597 return -EACCES;
598 }
599 rc = cio_tm_start_key(sch, tcw, lpm, key);
600 if (rc == 0)
601 cdev->private->intparm = intparm;
602 return rc;
603}
604EXPORT_SYMBOL(ccw_device_tm_start_key);
605
606/**
607 * ccw_device_tm_start_timeout_key - perform start function
608 * @cdev: ccw device on which to perform the start function
609 * @tcw: transport-command word to be started
610 * @intparm: user defined parameter to be passed to the interrupt handler
611 * @lpm: mask of paths to use
612 * @key: storage key to use for storage access
613 * @expires: time span in jiffies after which to abort request
614 *
615 * Start the tcw on the given ccw device. Return zero on success, non-zero
616 * otherwise.
617 */
618int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
619 unsigned long intparm, u8 lpm, u8 key,
620 int expires)
621{
622 int ret;
623
624 ccw_device_set_timeout(cdev, expires);
625 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
626 if (ret != 0)
627 ccw_device_set_timeout(cdev, 0);
628 return ret;
629}
630EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
631
632/**
633 * ccw_device_tm_start - perform start function
634 * @cdev: ccw device on which to perform the start function
635 * @tcw: transport-command word to be started
636 * @intparm: user defined parameter to be passed to the interrupt handler
637 * @lpm: mask of paths to use
638 *
639 * Start the tcw on the given ccw device. Return zero on success, non-zero
640 * otherwise.
641 */
642int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
643 unsigned long intparm, u8 lpm)
644{
645 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
646 PAGE_DEFAULT_KEY);
647}
648EXPORT_SYMBOL(ccw_device_tm_start);
649
650/**
651 * ccw_device_tm_start_timeout - perform start function
652 * @cdev: ccw device on which to perform the start function
653 * @tcw: transport-command word to be started
654 * @intparm: user defined parameter to be passed to the interrupt handler
655 * @lpm: mask of paths to use
656 * @expires: time span in jiffies after which to abort request
657 *
658 * Start the tcw on the given ccw device. Return zero on success, non-zero
659 * otherwise.
660 */
661int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
662 unsigned long intparm, u8 lpm, int expires)
663{
664 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
665 PAGE_DEFAULT_KEY, expires);
666}
667EXPORT_SYMBOL(ccw_device_tm_start_timeout);
668
669/**
670 * ccw_device_tm_intrg - perform interrogate function
671 * @cdev: ccw device on which to perform the interrogate function
672 *
673 * Perform an interrogate function on the given ccw device. Return zero on
674 * success, non-zero otherwise.
675 */
676int ccw_device_tm_intrg(struct ccw_device *cdev)
677{
678 struct subchannel *sch = to_subchannel(cdev->dev.parent);
679
680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
684 return -EINVAL;
685 return cio_tm_intrg(sch);
686}
687EXPORT_SYMBOL(ccw_device_tm_intrg);
688
571// FIXME: these have to go: 689// FIXME: these have to go:
572 690
573int 691int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 5cf7be008e98..86bc94eb607f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -28,13 +28,13 @@
28 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
29 * operation should be tried again. 29 * operation should be tried again.
30 */ 30 */
31static int __ccw_device_should_retry(struct scsw *scsw) 31static int __ccw_device_should_retry(union scsw *scsw)
32{ 32{
33 /* CC is only valid if start function bit is set. */ 33 /* CC is only valid if start function bit is set. */
34 if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) 34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
35 return 1; 35 return 1;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 36 /* No more activity. For sense and set PGID we stubbornly try again. */
37 if (!scsw->actl) 37 if (!scsw->cmd.actl)
38 return 1; 38 return 1;
39 return 0; 39 return 0;
40} 40}
@@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
125 125
126 sch = to_subchannel(cdev->dev.parent); 126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb; 127 irb = &cdev->private->irb;
128 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */ 129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) { 130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0; 131 cdev->private->flags.intretry = 0;
@@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 irb->ecw[6], irb->ecw[7]); 155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN; 156 return -EAGAIN;
157 } 157 }
158 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
188 188
189 irb = (struct irb *) __LC_IRB; 189 irb = (struct irb *) __LC_IRB;
190 190
191 if (irb->scsw.stctl == 191 if (irb->scsw.cmd.stctl ==
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
193 if (__ccw_device_should_retry(&irb->scsw)) { 193 if (__ccw_device_should_retry(&irb->scsw)) {
194 ret = __ccw_device_sense_pgid_start(cdev); 194 ret = __ccw_device_sense_pgid_start(cdev);
@@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
331 331
332 sch = to_subchannel(cdev->dev.parent); 332 sch = to_subchannel(cdev->dev.parent);
333 irb = &cdev->private->irb; 333 irb = &cdev->private->irb;
334 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
335 /* Retry Set PGID if requested. */ 335 /* Retry Set PGID if requested. */
336 if (cdev->private->flags.intretry) { 336 if (cdev->private->flags.intretry) {
337 cdev->private->flags.intretry = 0; 337 cdev->private->flags.intretry = 0;
@@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
355 irb->ecw[6], irb->ecw[7]); 355 irb->ecw[6], irb->ecw[7]);
356 return -EAGAIN; 356 return -EAGAIN;
357 } 357 }
358 if (irb->scsw.cc == 3) { 358 if (irb->scsw.cmd.cc == 3) {
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
360 " lpm %02X, became 'not operational'\n", 360 " lpm %02X, became 'not operational'\n",
361 cdev->private->dev_id.devno, sch->schid.ssid, 361 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
376 376
377 sch = to_subchannel(cdev->dev.parent); 377 sch = to_subchannel(cdev->dev.parent);
378 irb = &cdev->private->irb; 378 irb = &cdev->private->irb;
379 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
380 /* Retry NOP if requested. */ 380 /* Retry NOP if requested. */
381 if (cdev->private->flags.intretry) { 381 if (cdev->private->flags.intretry) {
382 cdev->private->flags.intretry = 0; 382 cdev->private->flags.intretry = 0;
@@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
384 } 384 }
385 return -ETIME; 385 return -ETIME;
386 } 386 }
387 if (irb->scsw.cc == 3) { 387 if (irb->scsw.cmd.cc == 3) {
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
389 " lpm %02X, became 'not operational'\n", 389 " lpm %02X, became 'not operational'\n",
390 cdev->private->dev_id.devno, sch->schid.ssid, 390 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
438 438
439 irb = (struct irb *) __LC_IRB; 439 irb = (struct irb *) __LC_IRB;
440 440
441 if (irb->scsw.stctl == 441 if (irb->scsw.cmd.stctl ==
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
443 if (__ccw_device_should_retry(&irb->scsw)) 443 if (__ccw_device_should_retry(&irb->scsw))
444 __ccw_device_verify_start(cdev); 444 __ccw_device_verify_start(cdev);
@@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
544 544
545 irb = (struct irb *) __LC_IRB; 545 irb = (struct irb *) __LC_IRB;
546 546
547 if (irb->scsw.stctl == 547 if (irb->scsw.cmd.stctl ==
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
549 if (__ccw_device_should_retry(&irb->scsw)) 549 if (__ccw_device_should_retry(&irb->scsw))
550 __ccw_device_disband_start(cdev); 550 __ccw_device_disband_start(cdev);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 4a38993000f2..1b03c5423be2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -29,9 +29,11 @@
29static void 29static void
30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
31{ 31{
32 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 32 char dbf_text[15];
33 SCHN_STAT_CHN_CTRL_CHK | 33
34 SCHN_STAT_INTF_CTRL_CHK))) 34 if (!scsw_is_valid_cstat(&irb->scsw) ||
35 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
36 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
35 return; 37 return;
36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 38 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
37 "received" 39 "received"
@@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
39 ": %02X sch_stat : %02X\n", 41 ": %02X sch_stat : %02X\n",
40 cdev->private->dev_id.devno, cdev->private->schid.ssid, 42 cdev->private->dev_id.devno, cdev->private->schid.ssid,
41 cdev->private->schid.sch_no, 43 cdev->private->schid.sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 44 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
43 45 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
44 if (irb->scsw.cc != 3) { 46 CIO_TRACE_EVENT(0, dbf_text);
45 char dbf_text[15]; 47 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
46
47 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
48 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 }
51} 48}
52 49
53/* 50/*
@@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
81 * are condition that have to be met for the extended control 78 * are condition that have to be met for the extended control
82 * bit to have meaning. Sick. 79 * bit to have meaning. Sick.
83 */ 80 */
84 cdev->private->irb.scsw.ectl = 0; 81 cdev->private->irb.scsw.cmd.ectl = 0;
85 if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && 82 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
86 !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) 83 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
87 cdev->private->irb.scsw.ectl = irb->scsw.ectl; 84 cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
88 /* Check if extended control word is valid. */ 85 /* Check if extended control word is valid. */
89 if (!cdev->private->irb.scsw.ectl) 86 if (!cdev->private->irb.scsw.cmd.ectl)
90 return; 87 return;
91 /* Copy concurrent sense / model dependent information. */ 88 /* Copy concurrent sense / model dependent information. */
92 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); 89 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
@@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
98static int 95static int
99ccw_device_accumulate_esw_valid(struct irb *irb) 96ccw_device_accumulate_esw_valid(struct irb *irb)
100{ 97{
101 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 98 if (!irb->scsw.cmd.eswf &&
99 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
102 return 0; 100 return 0;
103 if (irb->scsw.stctl == 101 if (irb->scsw.cmd.stctl ==
104 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && 102 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
105 !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 103 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
106 return 0; 104 return 0;
107 return 1; 105 return 1;
108} 106}
@@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
125 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; 123 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
126 124
127 /* Copy subchannel logout information if esw is of format 0. */ 125 /* Copy subchannel logout information if esw is of format 0. */
128 if (irb->scsw.eswf) { 126 if (irb->scsw.cmd.eswf) {
129 cdev_sublog = &cdev_irb->esw.esw0.sublog; 127 cdev_sublog = &cdev_irb->esw.esw0.sublog;
130 sublog = &irb->esw.esw0.sublog; 128 sublog = &irb->esw.esw0.sublog;
131 /* Copy extended status flags. */ 129 /* Copy extended status flags. */
@@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
134 * Copy fields that have a meaning for channel data check 132 * Copy fields that have a meaning for channel data check
135 * channel control check and interface control check. 133 * channel control check and interface control check.
136 */ 134 */
137 if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 135 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
138 SCHN_STAT_CHN_CTRL_CHK | 136 SCHN_STAT_CHN_CTRL_CHK |
139 SCHN_STAT_INTF_CTRL_CHK)) { 137 SCHN_STAT_INTF_CTRL_CHK)) {
140 /* Copy ancillary report bit. */ 138 /* Copy ancillary report bit. */
@@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
155 /* Copy i/o-error alert. */ 153 /* Copy i/o-error alert. */
156 cdev_sublog->ioerr = sublog->ioerr; 154 cdev_sublog->ioerr = sublog->ioerr;
157 /* Copy channel path timeout bit. */ 155 /* Copy channel path timeout bit. */
158 if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) 156 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
159 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; 157 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
160 /* Copy failing storage address validity flag. */ 158 /* Copy failing storage address validity flag. */
161 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; 159 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
@@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
200 * If not, the remaining bit have no meaning and we must ignore them. 198 * If not, the remaining bit have no meaning and we must ignore them.
201 * The esw is not meaningful as well... 199 * The esw is not meaningful as well...
202 */ 200 */
203 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 201 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
204 return; 202 return;
205 203
206 /* Check for channel checks and interface control checks. */ 204 /* Check for channel checks and interface control checks. */
207 ccw_device_msg_control_check(cdev, irb); 205 ccw_device_msg_control_check(cdev, irb);
208 206
209 /* Check for path not operational. */ 207 /* Check for path not operational. */
210 if (irb->scsw.pno && irb->scsw.fctl != 0 && 208 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
211 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
212 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
213 ccw_device_path_notoper(cdev); 209 ccw_device_path_notoper(cdev);
214 210 /* No irb accumulation for transport mode irbs. */
211 if (scsw_is_tm(&irb->scsw)) {
212 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
213 return;
214 }
215 /* 215 /*
216 * Don't accumulate unsolicited interrupts. 216 * Don't accumulate unsolicited interrupts.
217 */ 217 */
218 if ((irb->scsw.stctl == 218 if (!scsw_is_solicited(&irb->scsw))
219 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
220 (!irb->scsw.cc))
221 return; 219 return;
222 220
223 cdev_irb = &cdev->private->irb; 221 cdev_irb = &cdev->private->irb;
@@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
227 * status at the subchannel has been cleared and we must not pass 225 * status at the subchannel has been cleared and we must not pass
228 * intermediate accumulated status to the device driver. 226 * intermediate accumulated status to the device driver.
229 */ 227 */
230 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) 228 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
231 memset(&cdev->private->irb, 0, sizeof(struct irb)); 229 memset(&cdev->private->irb, 0, sizeof(struct irb));
232 230
233 /* Copy bits which are valid only for the start function. */ 231 /* Copy bits which are valid only for the start function. */
234 if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { 232 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
235 /* Copy key. */ 233 /* Copy key. */
236 cdev_irb->scsw.key = irb->scsw.key; 234 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
237 /* Copy suspend control bit. */ 235 /* Copy suspend control bit. */
238 cdev_irb->scsw.sctl = irb->scsw.sctl; 236 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
239 /* Accumulate deferred condition code. */ 237 /* Accumulate deferred condition code. */
240 cdev_irb->scsw.cc |= irb->scsw.cc; 238 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
241 /* Copy ccw format bit. */ 239 /* Copy ccw format bit. */
242 cdev_irb->scsw.fmt = irb->scsw.fmt; 240 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
243 /* Copy prefetch bit. */ 241 /* Copy prefetch bit. */
244 cdev_irb->scsw.pfch = irb->scsw.pfch; 242 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
245 /* Copy initial-status-interruption-control. */ 243 /* Copy initial-status-interruption-control. */
246 cdev_irb->scsw.isic = irb->scsw.isic; 244 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
247 /* Copy address limit checking control. */ 245 /* Copy address limit checking control. */
248 cdev_irb->scsw.alcc = irb->scsw.alcc; 246 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
249 /* Copy suppress suspend bit. */ 247 /* Copy suppress suspend bit. */
250 cdev_irb->scsw.ssi = irb->scsw.ssi; 248 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
251 } 249 }
252 250
253 /* Take care of the extended control bit and extended control word. */ 251 /* Take care of the extended control bit and extended control word. */
254 ccw_device_accumulate_ecw(cdev, irb); 252 ccw_device_accumulate_ecw(cdev, irb);
255 253
256 /* Accumulate function control. */ 254 /* Accumulate function control. */
257 cdev_irb->scsw.fctl |= irb->scsw.fctl; 255 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
258 /* Copy activity control. */ 256 /* Copy activity control. */
259 cdev_irb->scsw.actl= irb->scsw.actl; 257 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
260 /* Accumulate status control. */ 258 /* Accumulate status control. */
261 cdev_irb->scsw.stctl |= irb->scsw.stctl; 259 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
262 /* 260 /*
263 * Copy ccw address if it is valid. This is a bit simplified 261 * Copy ccw address if it is valid. This is a bit simplified
264 * but should be close enough for all practical purposes. 262 * but should be close enough for all practical purposes.
265 */ 263 */
266 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || 264 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
267 ((irb->scsw.stctl == 265 ((irb->scsw.cmd.stctl ==
268 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && 266 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
269 (irb->scsw.actl & SCSW_ACTL_DEVACT) && 267 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
270 (irb->scsw.actl & SCSW_ACTL_SCHACT)) || 268 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
271 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 269 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
272 cdev_irb->scsw.cpa = irb->scsw.cpa; 270 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
273 /* Accumulate device status, but not the device busy flag. */ 271 /* Accumulate device status, but not the device busy flag. */
274 cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; 272 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
275 /* dstat is not always valid. */ 273 /* dstat is not always valid. */
276 if (irb->scsw.stctl & 274 if (irb->scsw.cmd.stctl &
277 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS 275 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
278 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) 276 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
279 cdev_irb->scsw.dstat |= irb->scsw.dstat; 277 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
280 /* Accumulate subchannel status. */ 278 /* Accumulate subchannel status. */
281 cdev_irb->scsw.cstat |= irb->scsw.cstat; 279 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
282 /* Copy residual count if it is valid. */ 280 /* Copy residual count if it is valid. */
283 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 281 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
284 (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) 282 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
285 cdev_irb->scsw.count = irb->scsw.count; 283 == 0)
284 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
286 285
287 /* Take care of bits in the extended status word. */ 286 /* Take care of bits in the extended status word. */
288 ccw_device_accumulate_esw(cdev, irb); 287 ccw_device_accumulate_esw(cdev, irb);
@@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
299 * sense facility available/supported when enabling the 298 * sense facility available/supported when enabling the
300 * concurrent sense facility. 299 * concurrent sense facility.
301 */ 300 */
302 if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 301 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303 !(cdev_irb->esw.esw0.erw.cons)) 302 !(cdev_irb->esw.esw0.erw.cons))
304 cdev->private->flags.dosense = 1; 303 cdev->private->flags.dosense = 1;
305} 304}
@@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
318 317
319 /* A sense is required, can we do it now ? */ 318 /* A sense is required, can we do it now ? */
320 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 319 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
321 /* 320 /*
322 * we received an Unit Check but we have no final 321 * we received an Unit Check but we have no final
323 * status yet, therefore we must delay the SENSE 322 * status yet, therefore we must delay the SENSE
@@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
355 * If not, the remaining bit have no meaning and we must ignore them. 354 * If not, the remaining bit have no meaning and we must ignore them.
356 * The esw is not meaningful as well... 355 * The esw is not meaningful as well...
357 */ 356 */
358 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 357 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
359 return; 358 return;
360 359
361 /* Check for channel checks and interface control checks. */ 360 /* Check for channel checks and interface control checks. */
362 ccw_device_msg_control_check(cdev, irb); 361 ccw_device_msg_control_check(cdev, irb);
363 362
364 /* Check for path not operational. */ 363 /* Check for path not operational. */
365 if (irb->scsw.pno && irb->scsw.fctl != 0 && 364 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
366 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
367 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
368 ccw_device_path_notoper(cdev); 365 ccw_device_path_notoper(cdev);
369 366
370 if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 367 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
371 (irb->scsw.dstat & DEV_STAT_CHN_END)) { 368 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
372 cdev->private->irb.esw.esw0.erw.cons = 1; 369 cdev->private->irb.esw.esw0.erw.cons = 1;
373 cdev->private->flags.dosense = 0; 370 cdev->private->flags.dosense = 0;
374 } 371 }
@@ -386,11 +383,11 @@ int
386ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) 383ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
387{ 384{
388 ccw_device_accumulate_irb(cdev, irb); 385 ccw_device_accumulate_irb(cdev, irb);
389 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 386 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
390 return -EBUSY; 387 return -EBUSY;
391 /* Check for basic sense. */ 388 /* Check for basic sense. */
392 if (cdev->private->flags.dosense && 389 if (cdev->private->flags.dosense &&
393 !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { 390 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
394 cdev->private->irb.esw.esw0.erw.cons = 1; 391 cdev->private->irb.esw.esw0.erw.cons = 1;
395 cdev->private->flags.dosense = 0; 392 cdev->private->flags.dosense = 0;
396 return 0; 393 return 0;
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000000..61677dfbdc9b
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
1/*
2 * Functions for assembling fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include "cio.h"
16
17/**
18 * tcw_get_intrg - return pointer to associated interrogate tcw
19 * @tcw: pointer to the original tcw
20 *
21 * Return a pointer to the interrogate tcw associated with the specified tcw
22 * or %NULL if there is no associated interrogate tcw.
23 */
24struct tcw *tcw_get_intrg(struct tcw *tcw)
25{
26 return (struct tcw *) ((addr_t) tcw->intrg);
27}
28EXPORT_SYMBOL(tcw_get_intrg);
29
30/**
31 * tcw_get_data - return pointer to input/output data associated with tcw
32 * @tcw: pointer to the tcw
33 *
34 * Return the input or output data address specified in the tcw depending
35 * on whether the r-bit or the w-bit is set. If neither bit is set, return
36 * %NULL.
37 */
38void *tcw_get_data(struct tcw *tcw)
39{
40 if (tcw->r)
41 return (void *) ((addr_t) tcw->input);
42 if (tcw->w)
43 return (void *) ((addr_t) tcw->output);
44 return NULL;
45}
46EXPORT_SYMBOL(tcw_get_data);
47
48/**
49 * tcw_get_tccb - return pointer to tccb associated with tcw
50 * @tcw: pointer to the tcw
51 *
52 * Return pointer to the tccb associated with this tcw.
53 */
54struct tccb *tcw_get_tccb(struct tcw *tcw)
55{
56 return (struct tccb *) ((addr_t) tcw->tccb);
57}
58EXPORT_SYMBOL(tcw_get_tccb);
59
60/**
61 * tcw_get_tsb - return pointer to tsb associated with tcw
62 * @tcw: pointer to the tcw
63 *
64 * Return pointer to the tsb associated with this tcw.
65 */
66struct tsb *tcw_get_tsb(struct tcw *tcw)
67{
68 return (struct tsb *) ((addr_t) tcw->tsb);
69}
70EXPORT_SYMBOL(tcw_get_tsb);
71
72/**
73 * tcw_init - initialize tcw data structure
74 * @tcw: pointer to the tcw to be initialized
75 * @r: initial value of the r-bit
76 * @w: initial value of the w-bit
77 *
78 * Initialize all fields of the specified tcw data structure with zero and
79 * fill in the format, flags, r and w fields.
80 */
81void tcw_init(struct tcw *tcw, int r, int w)
82{
83 memset(tcw, 0, sizeof(struct tcw));
84 tcw->format = TCW_FORMAT_DEFAULT;
85 tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
86 if (r)
87 tcw->r = 1;
88 if (w)
89 tcw->w = 1;
90}
91EXPORT_SYMBOL(tcw_init);
92
93static inline size_t tca_size(struct tccb *tccb)
94{
95 return tccb->tcah.tcal - 12;
96}
97
98static u32 calc_dcw_count(struct tccb *tccb)
99{
100 int offset;
101 struct dcw *dcw;
102 u32 count = 0;
103 size_t size;
104
105 size = tca_size(tccb);
106 for (offset = 0; offset < size;) {
107 dcw = (struct dcw *) &tccb->tca[offset];
108 count += dcw->count;
109 if (!(dcw->flags & DCW_FLAGS_CC))
110 break;
111 offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
112 }
113 return count;
114}
115
116static u32 calc_cbc_size(struct tidaw *tidaw, int num)
117{
118 int i;
119 u32 cbc_data;
120 u32 cbc_count = 0;
121 u64 data_count = 0;
122
123 for (i = 0; i < num; i++) {
124 if (tidaw[i].flags & TIDAW_FLAGS_LAST)
125 break;
126 /* TODO: find out if padding applies to total of data
127 * transferred or data transferred by this tidaw. Assumption:
128 * applies to total. */
129 data_count += tidaw[i].count;
130 if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
131 cbc_data = 4 + ALIGN(data_count, 4) - data_count;
132 cbc_count += cbc_data;
133 data_count += cbc_data;
134 }
135 }
136 return cbc_count;
137}
138
139/**
140 * tcw_finalize - finalize tcw length fields and tidaw list
141 * @tcw: pointer to the tcw
142 * @num_tidaws: the number of tidaws used to address input/output data or zero
143 * if no tida is used
144 *
145 * Calculate the input-/output-count and tccbl field in the tcw, add a
146 * tcat the tccb and terminate the data tidaw list if used.
147 *
148 * Note: in case input- or output-tida is used, the tidaw-list must be stored
149 * in contiguous storage (no ttic). The tcal field in the tccb must be
150 * up-to-date.
151 */
152void tcw_finalize(struct tcw *tcw, int num_tidaws)
153{
154 struct tidaw *tidaw;
155 struct tccb *tccb;
156 struct tccb_tcat *tcat;
157 u32 count;
158
159 /* Terminate tidaw list. */
160 tidaw = tcw_get_data(tcw);
161 if (num_tidaws > 0)
162 tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat));
167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
170 count += calc_cbc_size(tidaw, num_tidaws);
171 if (tcw->r)
172 tcw->input_count = count;
173 else if (tcw->w)
174 tcw->output_count = count;
175 tcat->count = ALIGN(count, 4) + 4;
176 /* Calculate tccbl. */
177 tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
178 sizeof(struct tccb_tcat) - 20) >> 2;
179}
180EXPORT_SYMBOL(tcw_finalize);
181
182/**
183 * tcw_set_intrg - set the interrogate tcw address of a tcw
184 * @tcw: the tcw address
185 * @intrg_tcw: the address of the interrogate tcw
186 *
187 * Set the address of the interrogate tcw in the specified tcw.
188 */
189void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
190{
191 tcw->intrg = (u32) ((addr_t) intrg_tcw);
192}
193EXPORT_SYMBOL(tcw_set_intrg);
194
195/**
196 * tcw_set_data - set data address and tida flag of a tcw
197 * @tcw: the tcw address
198 * @data: the data address
199 * @use_tidal: zero of the data address specifies a contiguous block of data,
200 * non-zero if it specifies a list if tidaws.
201 *
202 * Set the input/output data address of a tcw (depending on the value of the
203 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
204 * is set as well.
205 */
206void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
207{
208 if (tcw->r) {
209 tcw->input = (u64) ((addr_t) data);
210 if (use_tidal)
211 tcw->flags |= TCW_FLAGS_INPUT_TIDA;
212 } else if (tcw->w) {
213 tcw->output = (u64) ((addr_t) data);
214 if (use_tidal)
215 tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
216 }
217}
218EXPORT_SYMBOL(tcw_set_data);
219
220/**
221 * tcw_set_tccb - set tccb address of a tcw
222 * @tcw: the tcw address
223 * @tccb: the tccb address
224 *
225 * Set the address of the tccb in the specified tcw.
226 */
227void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
228{
229 tcw->tccb = (u64) ((addr_t) tccb);
230}
231EXPORT_SYMBOL(tcw_set_tccb);
232
233/**
234 * tcw_set_tsb - set tsb address of a tcw
235 * @tcw: the tcw address
236 * @tsb: the tsb address
237 *
238 * Set the address of the tsb in the specified tcw.
239 */
240void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
241{
242 tcw->tsb = (u64) ((addr_t) tsb);
243}
244EXPORT_SYMBOL(tcw_set_tsb);
245
246/**
247 * tccb_init - initialize tccb
248 * @tccb: the tccb address
249 * @size: the maximum size of the tccb
250 * @sac: the service-action-code to be user
251 *
252 * Initialize the header of the specified tccb by resetting all values to zero
253 * and filling in defaults for format, sac and initial tcal fields.
254 */
255void tccb_init(struct tccb *tccb, size_t size, u32 sac)
256{
257 memset(tccb, 0, size);
258 tccb->tcah.format = TCCB_FORMAT_DEFAULT;
259 tccb->tcah.sac = sac;
260 tccb->tcah.tcal = 12;
261}
262EXPORT_SYMBOL(tccb_init);
263
264/**
265 * tsb_init - initialize tsb
266 * @tsb: the tsb address
267 *
268 * Initialize the specified tsb by resetting all values to zero.
269 */
270void tsb_init(struct tsb *tsb)
271{
272 memset(tsb, 0, sizeof(tsb));
273}
274EXPORT_SYMBOL(tsb_init);
275
276/**
277 * tccb_add_dcw - add a dcw to the tccb
278 * @tccb: the tccb address
279 * @tccb_size: the maximum tccb size
280 * @cmd: the dcw command
281 * @flags: flags for the dcw
282 * @cd: pointer to control data for this dcw or NULL if none is required
283 * @cd_count: number of control data bytes for this dcw
284 * @count: number of data bytes for this dcw
285 *
286 * Add a new dcw to the specified tccb by writing the dcw information specified
287 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
288 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
289 * would exceed the available space as defined by @tccb_size.
290 *
291 * Note: the tcal field of the tccb header will be updates to reflect added
292 * content.
293 */
294struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
295 void *cd, u8 cd_count, u32 count)
296{
297 struct dcw *dcw;
298 int size;
299 int tca_offset;
300
301 /* Check for space. */
302 tca_offset = tca_size(tccb);
303 size = ALIGN(sizeof(struct dcw) + cd_count, 4);
304 if (sizeof(struct tccb_tcah) + tca_offset + size +
305 sizeof(struct tccb_tcat) > tccb_size)
306 return ERR_PTR(-ENOSPC);
307 /* Add dcw to tca. */
308 dcw = (struct dcw *) &tccb->tca[tca_offset];
309 memset(dcw, 0, size);
310 dcw->cmd = cmd;
311 dcw->flags = flags;
312 dcw->count = count;
313 dcw->cd_count = cd_count;
314 if (cd)
315 memcpy(&dcw->cd[0], cd, cd_count);
316 tccb->tcah.tcal += size;
317 return dcw;
318}
319EXPORT_SYMBOL(tccb_add_dcw);
320
321/**
322 * tcw_add_tidaw - add a tidaw to a tcw
323 * @tcw: the tcw address
324 * @num_tidaws: the current number of tidaws
325 * @flags: flags for the new tidaw
326 * @addr: address value for the new tidaw
327 * @count: count value for the new tidaw
328 *
329 * Add a new tidaw to the input/output data tidaw-list of the specified tcw
330 * (depending on the value of the r-flag and w-flag) and return a pointer to
331 * the new tidaw.
332 *
333 * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
334 * must ensure that there is enough space for the new tidaw. The last-tidaw
335 * flag for the last tidaw in the list will be set by tcw_finalize.
336 */
337struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
338 void *addr, u32 count)
339{
340 struct tidaw *tidaw;
341
342 /* Add tidaw to tidaw-list. */
343 tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
344 memset(tidaw, 0, sizeof(struct tidaw));
345 tidaw->flags = flags;
346 tidaw->count = count;
347 tidaw->addr = (u64) ((addr_t) addr);
348 return tidaw;
349}
350EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 144466ab8c15..528065cb5021 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -8,7 +8,7 @@
8#ifndef S390_IDSET_H 8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H 9#define S390_IDSET_H S390_IDSET_H
10 10
11#include "schid.h" 11#include <asm/schid.h>
12 12
13struct idset; 13struct idset;
14 14
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8c613160bfce..3f8f1cf69c76 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,12 +1,12 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include "schid.h" 4#include <asm/schid.h>
5 5
6/* 6/*
7 * operation request block 7 * command-mode operation request block
8 */ 8 */
9struct orb { 9struct cmd_orb {
10 u32 intparm; /* interruption parameter */ 10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */ 11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */ 12 u32 spnd : 1; /* suspend control */
@@ -28,8 +28,36 @@ struct orb {
28 u32 cpa; /* channel program address */ 28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4))); 29} __attribute__ ((packed, aligned(4)));
30 30
31/*
32 * transport-mode operation request block
33 */
34struct tm_orb {
35 u32 intparm;
36 u32 key:4;
37 u32 :9;
38 u32 b:1;
39 u32 :2;
40 u32 lpm:8;
41 u32 :7;
42 u32 x:1;
43 u32 tcw;
44 u32 prio:8;
45 u32 :8;
46 u32 rsvpgm:8;
47 u32 :8;
48 u32 :32;
49 u32 :32;
50 u32 :32;
51 u32 :32;
52} __attribute__ ((packed, aligned(4)));
53
54union orb {
55 struct cmd_orb cmd;
56 struct tm_orb tm;
57} __attribute__ ((packed, aligned(4)));
58
31struct io_subchannel_private { 59struct io_subchannel_private {
32 struct orb orb; /* operation request block */ 60 union orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */ 61 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8))); 62} __attribute__ ((aligned(8)));
35 63
@@ -95,16 +123,18 @@ struct ccw_device_private {
95 void *cmb_wait; /* deferred cmb enable/disable */ 123 void *cmb_wait; /* deferred cmb enable/disable */
96}; 124};
97 125
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) 126static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
99{ 127{
100 register struct subchannel_id reg1 asm("1") = schid; 128 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode; 129 int ccode = -EIO;
102 130
103 asm volatile( 131 asm volatile(
104 " ssch 0(%2)\n" 132 " ssch 0(%2)\n"
105 " ipm %0\n" 133 "0: ipm %0\n"
106 " srl %0,28" 134 " srl %0,28\n"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 135 "1:\n"
136 EX_TABLE(0b, 1b)
137 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode; 138 return ccode;
109} 139}
110 140
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 652ea3625f9d..9fa2ac13ac85 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -2,7 +2,7 @@
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include "schid.h" 5#include <asm/schid.h>
6 6
7/* 7/*
8 * TPI info structure 8 * TPI info structure
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 000000000000..c592087be0f1
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
1/*
2 * Functions for registration of I/O interruption subclasses on s390.
3 *
4 * Copyright IBM Corp. 2008
5 * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <asm/isc.h>
11
12static unsigned int isc_refs[MAX_ISC + 1];
13static DEFINE_SPINLOCK(isc_ref_lock);
14
15
16/**
17 * isc_register - register an I/O interruption subclass.
18 * @isc: I/O interruption subclass to register
19 *
20 * The number of users for @isc is increased. If this is the first user to
21 * register @isc, the corresponding I/O interruption subclass mask is enabled.
22 *
23 * Context:
24 * This function must not be called in interrupt context.
25 */
26void isc_register(unsigned int isc)
27{
28 if (isc > MAX_ISC) {
29 WARN_ON(1);
30 return;
31 }
32
33 spin_lock(&isc_ref_lock);
34 if (isc_refs[isc] == 0)
35 ctl_set_bit(6, 31 - isc);
36 isc_refs[isc]++;
37 spin_unlock(&isc_ref_lock);
38}
39EXPORT_SYMBOL_GPL(isc_register);
40
41/**
42 * isc_unregister - unregister an I/O interruption subclass.
43 * @isc: I/O interruption subclass to unregister
44 *
45 * The number of users for @isc is decreased. If this is the last user to
46 * unregister @isc, the corresponding I/O interruption subclass mask is
47 * disabled.
48 * Note: This function must not be called if isc_register() hasn't been called
49 * before by the driver for @isc.
50 *
51 * Context:
52 * This function must not be called in interrupt context.
53 */
54void isc_unregister(unsigned int isc)
55{
56 spin_lock(&isc_ref_lock);
57 /* check for misuse */
58 if (isc > MAX_ISC || isc_refs[isc] == 0) {
59 WARN_ON(1);
60 goto out_unlock;
61 }
62 if (isc_refs[isc] == 1)
63 ctl_clear_bit(6, 31 - isc);
64 isc_refs[isc]--;
65out_unlock:
66 spin_unlock(&isc_ref_lock);
67}
68EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000000..17da9ab932ed
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,327 @@
1/*
2 * Functions for incremental construction of fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include <asm/itcw.h>
16
17/**
18 * struct itcw - incremental tcw helper data type
19 *
20 * This structure serves as a handle for the incremental construction of a
21 * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
22 * tcw and associated data. The data structures are contained inside a single
23 * contiguous buffer provided by the user.
24 *
25 * The itcw construction functions take care of overall data integrity:
26 * - reset unused fields to zero
27 * - fill in required pointers
28 * - ensure required alignment for data structures
29 * - prevent data structures to cross 4k-byte boundary where required
30 * - calculate tccb-related length fields
31 * - optionally provide ready-made interrogate tcw and associated structures
32 *
33 * Restrictions apply to the itcws created with these construction functions:
34 * - tida only supported for data address, not for tccb
35 * - only contiguous tidaw-lists (no ttic)
36 * - total number of bytes required per itcw may not exceed 4k bytes
37 * - either read or write operation (may not work with r=0 and w=0)
38 *
39 * Example:
40 * struct itcw *itcw;
41 * void *buffer;
42 * size_t size;
43 *
44 * size = itcw_calc_size(1, 2, 0);
45 * buffer = kmalloc(size, GFP_DMA);
46 * if (!buffer)
47 * return -ENOMEM;
48 * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
49 * if (IS_ERR(itcw))
50 * return PTR_ER(itcw);
51 * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
52 * itcw_add_tidaw(itcw, 0, 0x30000, 20);
53 * itcw_add_tidaw(itcw, 0, 0x40000, 52);
54 * itcw_finalize(itcw);
55 *
56 */
57struct itcw {
58 struct tcw *tcw;
59 struct tcw *intrg_tcw;
60 int num_tidaws;
61 int max_tidaws;
62 int intrg_num_tidaws;
63 int intrg_max_tidaws;
64};
65
66/**
67 * itcw_get_tcw - return pointer to tcw associated with the itcw
68 * @itcw: address of the itcw
69 *
70 * Return pointer to the tcw associated with the itcw.
71 */
72struct tcw *itcw_get_tcw(struct itcw *itcw)
73{
74 return itcw->tcw;
75}
76EXPORT_SYMBOL(itcw_get_tcw);
77
78/**
79 * itcw_calc_size - return the size of an itcw with the given parameters
80 * @intrg: if non-zero, add an interrogate tcw
81 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
82 * if no tida is to be used.
83 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
84 * by the interrogate tcw, if specified
85 *
86 * Calculate and return the number of bytes required to hold an itcw with the
87 * given parameters and assuming tccbs with maximum size.
88 *
89 * Note that the resulting size also contains bytes needed for alignment
90 * padding as well as padding to ensure that data structures don't cross a
91 * 4k-boundary where required.
92 */
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{
95 size_t len;
96
97 /* Main data. */
98 len = sizeof(struct itcw);
99 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
100 /* TSB */ sizeof(struct tsb) +
101 /* TIDAL */ max_tidaws * sizeof(struct tidaw);
102 /* Interrogate data. */
103 if (intrg) {
104 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
105 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 }
108 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
112 len += max(max_tidaws, intrg_max_tidaws) *
113 sizeof(struct tidaw) - 1;
114 return len;
115}
116EXPORT_SYMBOL(itcw_calc_size);
117
118#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
119
120static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
121 int align, int check_4k)
122{
123 addr_t addr;
124
125 addr = ALIGN(*start, align);
126 if (check_4k && CROSS4K(addr, len)) {
127 addr = ALIGN(addr, 4096);
128 addr = ALIGN(addr, align);
129 }
130 if (addr + len > end)
131 return ERR_PTR(-ENOSPC);
132 *start = addr + len;
133 return (void *) addr;
134}
135
136/**
137 * itcw_init - initialize incremental tcw data structure
138 * @buffer: address of buffer to use for data structures
139 * @size: number of bytes in buffer
140 * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
141 * operation tcw
142 * @intrg: if non-zero, add and initialize an interrogate tcw
143 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
144 * if no tida is to be used.
145 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
146 * by the interrogate tcw, if specified
147 *
148 * Prepare the specified buffer to be used as an incremental tcw, i.e. a
149 * helper data structure that can be used to construct a valid tcw by
150 * successive calls to other helper functions. Note: the buffer needs to be
151 * located below the 2G address limit. The resulting tcw has the following
152 * restrictions:
153 * - no tccb tidal
154 * - input/output tidal is contiguous (no ttic)
155 * - total data should not exceed 4k
156 * - tcw specifies either read or write operation
157 *
158 * On success, return pointer to the resulting incremental tcw data structure,
159 * ERR_PTR otherwise.
160 */
161struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
162 int max_tidaws, int intrg_max_tidaws)
163{
164 struct itcw *itcw;
165 void *chunk;
166 addr_t start;
167 addr_t end;
168
169 /* Check for 2G limit. */
170 start = (addr_t) buffer;
171 end = start + size;
172 if (end > (1 << 31))
173 return ERR_PTR(-EINVAL);
174 memset(buffer, 0, size);
175 /* ITCW. */
176 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
177 if (IS_ERR(chunk))
178 return chunk;
179 itcw = chunk;
180 itcw->max_tidaws = max_tidaws;
181 itcw->intrg_max_tidaws = intrg_max_tidaws;
182 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk))
185 return chunk;
186 itcw->tcw = chunk;
187 tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
188 (op == ITCW_OP_WRITE) ? 1 : 0);
189 /* Interrogate TCW. */
190 if (intrg) {
191 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
192 if (IS_ERR(chunk))
193 return chunk;
194 itcw->intrg_tcw = chunk;
195 tcw_init(itcw->intrg_tcw, 1, 0);
196 tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
197 }
198 /* Data TIDAL. */
199 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1);
202 if (IS_ERR(chunk))
203 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1);
205 }
206 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1);
210 if (IS_ERR(chunk))
211 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1);
213 }
214 /* TSB. */
215 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
216 if (IS_ERR(chunk))
217 return chunk;
218 tsb_init(chunk);
219 tcw_set_tsb(itcw->tcw, chunk);
220 /* Interrogate TSB. */
221 if (intrg) {
222 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
223 if (IS_ERR(chunk))
224 return chunk;
225 tsb_init(chunk);
226 tcw_set_tsb(itcw->intrg_tcw, chunk);
227 }
228 /* TCCB. */
229 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
230 if (IS_ERR(chunk))
231 return chunk;
232 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
233 tcw_set_tccb(itcw->tcw, chunk);
234 /* Interrogate TCCB. */
235 if (intrg) {
236 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
237 if (IS_ERR(chunk))
238 return chunk;
239 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
240 tcw_set_tccb(itcw->intrg_tcw, chunk);
241 tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
242 sizeof(struct dcw_intrg_data), 0);
243 tcw_finalize(itcw->intrg_tcw, 0);
244 }
245 return itcw;
246}
247EXPORT_SYMBOL(itcw_init);
248
249/**
250 * itcw_add_dcw - add a dcw to the itcw
251 * @itcw: address of the itcw
252 * @cmd: the dcw command
253 * @flags: flags for the dcw
254 * @cd: address of control data for this dcw or NULL if none is required
255 * @cd_count: number of control data bytes for this dcw
256 * @count: number of data bytes for this dcw
257 *
258 * Add a new dcw to the specified itcw by writing the dcw information specified
259 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
260 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
261 * would exceed the available space.
262 *
263 * Note: the tcal field of the tccb header will be updated to reflect added
264 * content.
265 */
266struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
267 u8 cd_count, u32 count)
268{
269 return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
270 flags, cd, cd_count, count);
271}
272EXPORT_SYMBOL(itcw_add_dcw);
273
274/**
275 * itcw_add_tidaw - add a tidaw to the itcw
276 * @itcw: address of the itcw
277 * @flags: flags for the new tidaw
278 * @addr: address value for the new tidaw
279 * @count: count value for the new tidaw
280 *
281 * Add a new tidaw to the input/output data tidaw-list of the specified itcw
282 * (depending on the value of the r-flag and w-flag). Return a pointer to
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space.
285 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
288 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{
291 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC);
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294}
295EXPORT_SYMBOL(itcw_add_tidaw);
296
297/**
298 * itcw_set_data - set data address and tida flag of the itcw
299 * @itcw: address of the itcw
300 * @addr: the data address
301 * @use_tidal: zero of the data address specifies a contiguous block of data,
302 * non-zero if it specifies a list if tidaws.
303 *
304 * Set the input/output data address of the itcw (depending on the value of the
305 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
306 * is set as well.
307 */
308void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
309{
310 tcw_set_data(itcw->tcw, addr, use_tidal);
311}
312EXPORT_SYMBOL(itcw_set_data);
313
314/**
315 * itcw_finalize - calculate length and count fields of the itcw
316 * @itcw: address of the itcw
317 *
318 * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
319 * In case input- or output-tida is used, the tidaw-list must be stored in
320 * continuous storage (no ttic). The tcal field in the tccb must be
321 * up-to-date.
322 */
323void itcw_finalize(struct itcw *itcw)
324{
325 tcw_finalize(itcw->tcw, itcw->num_tidaws);
326}
327EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 445cf364e461..2bf36e14b102 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev)
2082 default: 2082 default:
2083 BUG(); 2083 BUG();
2084 } 2084 }
2085 ccw_device_set_timeout(cdev, 0);
2086 wake_up(&cdev->private->wait_q); 2085 wake_up(&cdev->private->wait_q);
2087} 2086}
2088 2087
@@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2121 case -EIO: 2120 case -EIO:
2122 QDIO_PRINT_ERR("i/o error on device %s\n", 2121 QDIO_PRINT_ERR("i/o error on device %s\n",
2123 cdev->dev.bus_id); 2122 cdev->dev.bus_id);
2123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2124 wake_up(&cdev->private->wait_q);
2124 return; 2125 return;
2125 case -ETIMEDOUT: 2126 case -ETIMEDOUT:
2126 qdio_timeout_handler(cdev); 2127 qdio_timeout_handler(cdev);
@@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2139 QDIO_DBF_TEXT4(0, trace, dbf_text); 2140 QDIO_DBF_TEXT4(0, trace, dbf_text);
2140#endif /* CONFIG_QDIO_DEBUG */ 2141#endif /* CONFIG_QDIO_DEBUG */
2141 2142
2142 cstat = irb->scsw.cstat; 2143 cstat = irb->scsw.cmd.cstat;
2143 dstat = irb->scsw.dstat; 2144 dstat = irb->scsw.cmd.dstat;
2144 2145
2145 switch (irq_ptr->state) { 2146 switch (irq_ptr->state) {
2146 case QDIO_IRQ_STATE_INACTIVE: 2147 case QDIO_IRQ_STATE_INACTIVE:
@@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void)
2353{ 2354{
2354 char dbf_text[15]; 2355 char dbf_text[15];
2355 2356
2356 if (!css_characteristics_avail)
2357 return -EIO;
2358
2359 /* Check for bit 41. */ 2357 /* Check for bit 41. */
2360 if (!css_general_characteristics.aif) { 2358 if (!css_general_characteristics.aif) {
2361 QDIO_PRINT_WARN("Adapter interruption facility not " \ 2359 QDIO_PRINT_WARN("Adapter interruption facility not " \
@@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2667 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2665 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2668 } else if (rc == 0) { 2666 } else if (rc == 0) {
2669 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 2667 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2670 ccw_device_set_timeout(cdev, timeout);
2671 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); 2668 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2672 2669
2673 wait_event(cdev->private->wait_q, 2670 wait_event_interruptible_timeout(cdev->private->wait_q,
2674 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 2671 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2675 irq_ptr->state == QDIO_IRQ_STATE_ERR); 2672 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2673 timeout);
2676 } else { 2674 } else {
2677 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " 2675 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2678 "device %s\n", result, cdev->dev.bus_id); 2676 "device %s\n", result, cdev->dev.bus_id);
@@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2692 2690
2693 /* Ignore errors. */ 2691 /* Ignore errors. */
2694 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 2692 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2695 ccw_device_set_timeout(cdev, 0);
2696out: 2693out:
2697 up(&irq_ptr->setting_up_sema); 2694 up(&irq_ptr->setting_up_sema);
2698 return result; 2695 return result;
@@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2907 QDIO_DBF_TEXT0(0,setup,dbf_text); 2904 QDIO_DBF_TEXT0(0,setup,dbf_text);
2908 QDIO_DBF_TEXT0(0,trace,dbf_text); 2905 QDIO_DBF_TEXT0(0,trace,dbf_text);
2909 2906
2910 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { 2907 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
2911 ccw_device_set_timeout(cdev, 0);
2912 return; 2908 return;
2913 }
2914 2909
2915 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); 2910 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2916 ccw_device_set_timeout(cdev, 0);
2917} 2911}
2918 2912
2919int 2913int
@@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data)
3196 irq_ptr->schid.ssid, irq_ptr->schid.sch_no, 3190 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3197 result, result2); 3191 result, result2);
3198 result=result2; 3192 result=result2;
3199 if (result)
3200 ccw_device_set_timeout(cdev, 0);
3201 } 3193 }
3202 3194
3203 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); 3195 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
@@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags)
3279 3271
3280 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); 3272 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3281 3273
3282 ccw_device_set_timeout(cdev, 0);
3283 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 3274 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3284 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, 3275 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3285 0, DOIO_DENY_PREFETCH); 3276 0, DOIO_DENY_PREFETCH);
@@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void)
3722 char dbf_text[20]; 3713 char dbf_text[20];
3723 3714
3724 tiqdio_ind = 3715 tiqdio_ind =
3725 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); 3716 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
3717 TIQDIO_THININT_ISC);
3726 if (IS_ERR(tiqdio_ind)) { 3718 if (IS_ERR(tiqdio_ind)) {
3727 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); 3719 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3728 QDIO_DBF_TEXT0(0,setup,dbf_text); 3720 QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -3738,7 +3730,8 @@ static void
3738tiqdio_unregister_thinints(void) 3730tiqdio_unregister_thinints(void)
3739{ 3731{
3740 if (tiqdio_ind) 3732 if (tiqdio_ind)
3741 s390_unregister_adapter_interrupt(tiqdio_ind); 3733 s390_unregister_adapter_interrupt(tiqdio_ind,
3734 TIQDIO_THININT_ISC);
3742} 3735}
3743 3736
3744static int 3737static int
@@ -3899,6 +3892,7 @@ init_QDIO(void)
3899 qdio_mempool_alloc, 3892 qdio_mempool_alloc,
3900 qdio_mempool_free, NULL); 3893 qdio_mempool_free, NULL);
3901 3894
3895 isc_register(QDIO_AIRQ_ISC);
3902 if (tiqdio_check_chsc_availability()) 3896 if (tiqdio_check_chsc_availability())
3903 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); 3897 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3904 3898
@@ -3911,6 +3905,7 @@ static void __exit
3911cleanup_QDIO(void) 3905cleanup_QDIO(void)
3912{ 3906{
3913 tiqdio_unregister_thinints(); 3907 tiqdio_unregister_thinints();
3908 isc_unregister(QDIO_AIRQ_ISC);
3914 qdio_remove_procfs_entry(); 3909 qdio_remove_procfs_entry();
3915 qdio_release_qdio_memory(); 3910 qdio_release_qdio_memory();
3916 qdio_unregister_dbf_views(); 3911 qdio_unregister_dbf_views();
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c3df6b2c38b7..7656081a24d2 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -2,8 +2,8 @@
2#define _CIO_QDIO_H 2#define _CIO_QDIO_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5#include <asm/isc.h>
6#include "schid.h" 6#include <asm/schid.h>
7 7
8#ifdef CONFIG_QDIO_DEBUG 8#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 9#define QDIO_VERBOSE_LEVEL 9
@@ -26,7 +26,7 @@
26 */ 26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4 27#define IQDIO_FILL_LEVEL_TO_POLL 4
28 28
29#define TIQDIO_THININT_ISC 3 29#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
30#define TIQDIO_DELAY_TARGET 0 30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ 31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ 32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
deleted file mode 100644
index 54328fec5ade..000000000000
--- a/drivers/s390/cio/schid.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
new file mode 100644
index 000000000000..f8da25ab576d
--- /dev/null
+++ b/drivers/s390/cio/scsw.c
@@ -0,0 +1,843 @@
1/*
2 * Helper functions for scsw access.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <asm/cio.h>
11#include "css.h"
12#include "chsc.h"
13
14/**
15 * scsw_is_tm - check for transport mode scsw
16 * @scsw: pointer to scsw
17 *
18 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise.
20 */
21int scsw_is_tm(union scsw *scsw)
22{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24}
25EXPORT_SYMBOL(scsw_is_tm);
26
27/**
28 * scsw_key - return scsw key field
29 * @scsw: pointer to scsw
30 *
31 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw.
33 */
34u32 scsw_key(union scsw *scsw)
35{
36 if (scsw_is_tm(scsw))
37 return scsw->tm.key;
38 else
39 return scsw->cmd.key;
40}
41EXPORT_SYMBOL(scsw_key);
42
43/**
44 * scsw_eswf - return scsw eswf field
45 * @scsw: pointer to scsw
46 *
47 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw.
49 */
50u32 scsw_eswf(union scsw *scsw)
51{
52 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf;
54 else
55 return scsw->cmd.eswf;
56}
57EXPORT_SYMBOL(scsw_eswf);
58
59/**
60 * scsw_cc - return scsw cc field
61 * @scsw: pointer to scsw
62 *
63 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw.
65 */
66u32 scsw_cc(union scsw *scsw)
67{
68 if (scsw_is_tm(scsw))
69 return scsw->tm.cc;
70 else
71 return scsw->cmd.cc;
72}
73EXPORT_SYMBOL(scsw_cc);
74
75/**
76 * scsw_ectl - return scsw ectl field
77 * @scsw: pointer to scsw
78 *
79 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw.
81 */
82u32 scsw_ectl(union scsw *scsw)
83{
84 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl;
86 else
87 return scsw->cmd.ectl;
88}
89EXPORT_SYMBOL(scsw_ectl);
90
91/**
92 * scsw_pno - return scsw pno field
93 * @scsw: pointer to scsw
94 *
95 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw.
97 */
98u32 scsw_pno(union scsw *scsw)
99{
100 if (scsw_is_tm(scsw))
101 return scsw->tm.pno;
102 else
103 return scsw->cmd.pno;
104}
105EXPORT_SYMBOL(scsw_pno);
106
107/**
108 * scsw_fctl - return scsw fctl field
109 * @scsw: pointer to scsw
110 *
111 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw.
113 */
114u32 scsw_fctl(union scsw *scsw)
115{
116 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl;
118 else
119 return scsw->cmd.fctl;
120}
121EXPORT_SYMBOL(scsw_fctl);
122
123/**
124 * scsw_actl - return scsw actl field
125 * @scsw: pointer to scsw
126 *
127 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw.
129 */
130u32 scsw_actl(union scsw *scsw)
131{
132 if (scsw_is_tm(scsw))
133 return scsw->tm.actl;
134 else
135 return scsw->cmd.actl;
136}
137EXPORT_SYMBOL(scsw_actl);
138
139/**
140 * scsw_stctl - return scsw stctl field
141 * @scsw: pointer to scsw
142 *
143 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw.
145 */
146u32 scsw_stctl(union scsw *scsw)
147{
148 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl;
150 else
151 return scsw->cmd.stctl;
152}
153EXPORT_SYMBOL(scsw_stctl);
154
155/**
156 * scsw_dstat - return scsw dstat field
157 * @scsw: pointer to scsw
158 *
159 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw.
161 */
162u32 scsw_dstat(union scsw *scsw)
163{
164 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat;
166 else
167 return scsw->cmd.dstat;
168}
169EXPORT_SYMBOL(scsw_dstat);
170
171/**
172 * scsw_cstat - return scsw cstat field
173 * @scsw: pointer to scsw
174 *
175 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw.
177 */
178u32 scsw_cstat(union scsw *scsw)
179{
180 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat;
182 else
183 return scsw->cmd.cstat;
184}
185EXPORT_SYMBOL(scsw_cstat);
186
187/**
188 * scsw_cmd_is_valid_key - check key field validity
189 * @scsw: pointer to scsw
190 *
191 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise.
193 */
194int scsw_cmd_is_valid_key(union scsw *scsw)
195{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199
200/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity
202 * @scsw: pointer to scsw
203 *
204 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise.
206 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212
213/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity
215 * @scsw: pointer to scsw
216 *
217 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise.
219 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225
226/**
227 * scsw_cmd_is_valid_cc - check cc field validity
228 * @scsw: pointer to scsw
229 *
230 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise.
232 */
233int scsw_cmd_is_valid_cc(union scsw *scsw)
234{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239
240/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity
242 * @scsw: pointer to scsw
243 *
244 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise.
246 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252
253/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity
255 * @scsw: pointer to scsw
256 *
257 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise.
259 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265
266/**
267 * scsw_cmd_is_valid_isic - check isic field validity
268 * @scsw: pointer to scsw
269 *
270 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise.
272 */
273int scsw_cmd_is_valid_isic(union scsw *scsw)
274{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278
279/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity
281 * @scsw: pointer to scsw
282 *
283 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise.
285 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291
292/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity
294 * @scsw: pointer to scsw
295 *
296 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise.
298 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304
305/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity
307 * @scsw: pointer to scsw
308 *
309 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise.
311 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318
319/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity
321 * @scsw: pointer to scsw
322 *
323 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise.
325 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333
334/**
335 * scsw_cmd_is_valid_pno - check pno field validity
336 * @scsw: pointer to scsw
337 *
338 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise.
340 */
341int scsw_cmd_is_valid_pno(union scsw *scsw)
342{
343 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
345 (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350
351/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity
353 * @scsw: pointer to scsw
354 *
355 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise.
357 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{
360 /* Only valid if pmcw.dnv == 1*/
361 return 1;
362}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364
365/**
366 * scsw_cmd_is_valid_actl - check actl field validity
367 * @scsw: pointer to scsw
368 *
369 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise.
371 */
372int scsw_cmd_is_valid_actl(union scsw *scsw)
373{
374 /* Only valid if pmcw.dnv == 1*/
375 return 1;
376}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378
379/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity
381 * @scsw: pointer to scsw
382 *
383 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise.
385 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{
388 /* Only valid if pmcw.dnv == 1*/
389 return 1;
390}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392
393/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity
395 * @scsw: pointer to scsw
396 *
397 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise.
399 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3);
404}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406
407/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity
409 * @scsw: pointer to scsw
410 *
411 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise.
413 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3);
418}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420
421/**
422 * scsw_tm_is_valid_key - check key field validity
423 * @scsw: pointer to scsw
424 *
425 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise.
427 */
428int scsw_tm_is_valid_key(union scsw *scsw)
429{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433
434/**
435 * scsw_tm_is_valid_eswf - check eswf field validity
436 * @scsw: pointer to scsw
437 *
438 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise.
440 */
441int scsw_tm_is_valid_eswf(union scsw *scsw)
442{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446
447/**
448 * scsw_tm_is_valid_cc - check cc field validity
449 * @scsw: pointer to scsw
450 *
451 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise.
453 */
454int scsw_tm_is_valid_cc(union scsw *scsw)
455{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460
461/**
462 * scsw_tm_is_valid_fmt - check fmt field validity
463 * @scsw: pointer to scsw
464 *
465 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise.
467 */
468int scsw_tm_is_valid_fmt(union scsw *scsw)
469{
470 return 1;
471}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473
474/**
475 * scsw_tm_is_valid_x - check x field validity
476 * @scsw: pointer to scsw
477 *
478 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise.
480 */
481int scsw_tm_is_valid_x(union scsw *scsw)
482{
483 return 1;
484}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486
487/**
488 * scsw_tm_is_valid_q - check q field validity
489 * @scsw: pointer to scsw
490 *
491 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise.
493 */
494int scsw_tm_is_valid_q(union scsw *scsw)
495{
496 return 1;
497}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499
500/**
501 * scsw_tm_is_valid_ectl - check ectl field validity
502 * @scsw: pointer to scsw
503 *
504 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise.
506 */
507int scsw_tm_is_valid_ectl(union scsw *scsw)
508{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514
515/**
516 * scsw_tm_is_valid_pno - check pno field validity
517 * @scsw: pointer to scsw
518 *
519 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise.
521 */
522int scsw_tm_is_valid_pno(union scsw *scsw)
523{
524 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
526 (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531
532/**
533 * scsw_tm_is_valid_fctl - check fctl field validity
534 * @scsw: pointer to scsw
535 *
536 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise.
538 */
539int scsw_tm_is_valid_fctl(union scsw *scsw)
540{
541 /* Only valid if pmcw.dnv == 1*/
542 return 1;
543}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545
546/**
547 * scsw_tm_is_valid_actl - check actl field validity
548 * @scsw: pointer to scsw
549 *
550 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise.
552 */
553int scsw_tm_is_valid_actl(union scsw *scsw)
554{
555 /* Only valid if pmcw.dnv == 1*/
556 return 1;
557}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559
560/**
561 * scsw_tm_is_valid_stctl - check stctl field validity
562 * @scsw: pointer to scsw
563 *
564 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise.
566 */
567int scsw_tm_is_valid_stctl(union scsw *scsw)
568{
569 /* Only valid if pmcw.dnv == 1*/
570 return 1;
571}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573
574/**
575 * scsw_tm_is_valid_dstat - check dstat field validity
576 * @scsw: pointer to scsw
577 *
578 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise.
580 */
581int scsw_tm_is_valid_dstat(union scsw *scsw)
582{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3);
585}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587
588/**
589 * scsw_tm_is_valid_cstat - check cstat field validity
590 * @scsw: pointer to scsw
591 *
592 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise.
594 */
595int scsw_tm_is_valid_cstat(union scsw *scsw)
596{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3);
599}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601
602/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity
604 * @scsw: pointer to scsw
605 *
606 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise.
608 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{
611 return 1;
612}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614
615/**
616 * scsw_tm_is_valid_schxs - check schxs field validity
617 * @scsw: pointer to scsw
618 *
619 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise.
621 */
622int scsw_tm_is_valid_schxs(union scsw *scsw)
623{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK));
628}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630
631/**
632 * scsw_is_valid_actl - check actl field validity
633 * @scsw: pointer to scsw
634 *
635 * Return non-zero if the actl field of the specified scsw is valid,
636 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value.
638 */
639int scsw_is_valid_actl(union scsw *scsw)
640{
641 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw);
643 else
644 return scsw_cmd_is_valid_actl(scsw);
645}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647
648/**
649 * scsw_is_valid_cc - check cc field validity
650 * @scsw: pointer to scsw
651 *
652 * Return non-zero if the cc field of the specified scsw is valid,
653 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value.
655 */
656int scsw_is_valid_cc(union scsw *scsw)
657{
658 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw);
660 else
661 return scsw_cmd_is_valid_cc(scsw);
662}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664
665/**
666 * scsw_is_valid_cstat - check cstat field validity
667 * @scsw: pointer to scsw
668 *
669 * Return non-zero if the cstat field of the specified scsw is valid,
670 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value.
672 */
673int scsw_is_valid_cstat(union scsw *scsw)
674{
675 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw);
677 else
678 return scsw_cmd_is_valid_cstat(scsw);
679}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681
682/**
683 * scsw_is_valid_dstat - check dstat field validity
684 * @scsw: pointer to scsw
685 *
686 * Return non-zero if the dstat field of the specified scsw is valid,
687 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value.
689 */
690int scsw_is_valid_dstat(union scsw *scsw)
691{
692 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw);
694 else
695 return scsw_cmd_is_valid_dstat(scsw);
696}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698
699/**
700 * scsw_is_valid_ectl - check ectl field validity
701 * @scsw: pointer to scsw
702 *
703 * Return non-zero if the ectl field of the specified scsw is valid,
704 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value.
706 */
707int scsw_is_valid_ectl(union scsw *scsw)
708{
709 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw);
711 else
712 return scsw_cmd_is_valid_ectl(scsw);
713}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715
716/**
717 * scsw_is_valid_eswf - check eswf field validity
718 * @scsw: pointer to scsw
719 *
720 * Return non-zero if the eswf field of the specified scsw is valid,
721 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value.
723 */
724int scsw_is_valid_eswf(union scsw *scsw)
725{
726 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw);
728 else
729 return scsw_cmd_is_valid_eswf(scsw);
730}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732
733/**
734 * scsw_is_valid_fctl - check fctl field validity
735 * @scsw: pointer to scsw
736 *
737 * Return non-zero if the fctl field of the specified scsw is valid,
738 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value.
740 */
741int scsw_is_valid_fctl(union scsw *scsw)
742{
743 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw);
745 else
746 return scsw_cmd_is_valid_fctl(scsw);
747}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749
750/**
751 * scsw_is_valid_key - check key field validity
752 * @scsw: pointer to scsw
753 *
754 * Return non-zero if the key field of the specified scsw is valid,
755 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value.
757 */
758int scsw_is_valid_key(union scsw *scsw)
759{
760 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw);
762 else
763 return scsw_cmd_is_valid_key(scsw);
764}
765EXPORT_SYMBOL(scsw_is_valid_key);
766
767/**
768 * scsw_is_valid_pno - check pno field validity
769 * @scsw: pointer to scsw
770 *
771 * Return non-zero if the pno field of the specified scsw is valid,
772 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value.
774 */
775int scsw_is_valid_pno(union scsw *scsw)
776{
777 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw);
779 else
780 return scsw_cmd_is_valid_pno(scsw);
781}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783
784/**
785 * scsw_is_valid_stctl - check stctl field validity
786 * @scsw: pointer to scsw
787 *
788 * Return non-zero if the stctl field of the specified scsw is valid,
789 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value.
791 */
792int scsw_is_valid_stctl(union scsw *scsw)
793{
794 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw);
796 else
797 return scsw_cmd_is_valid_stctl(scsw);
798}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800
801/**
802 * scsw_cmd_is_solicited - check for solicited scsw
803 * @scsw: pointer to scsw
804 *
805 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited.
807 */
808int scsw_cmd_is_solicited(union scsw *scsw)
809{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814
815/**
816 * scsw_tm_is_solicited - check for solicited scsw
817 * @scsw: pointer to scsw
818 *
819 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited.
821 */
822int scsw_tm_is_solicited(union scsw *scsw)
823{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828
829/**
830 * scsw_is_solicited - check for solicited scsw
831 * @scsw: pointer to scsw
832 *
833 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited.
835 */
836int scsw_is_solicited(union scsw *scsw)
837{
838 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw);
840 else
841 return scsw_cmd_is_solicited(scsw);
842}
843EXPORT_SYMBOL(scsw_is_solicited);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1ab3e3efd11..62b6b55230d0 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -34,13 +34,15 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <asm/s390_rdev.h> 35#include <asm/s390_rdev.h>
36#include <asm/reset.h> 36#include <asm/reset.h>
37#include <linux/hrtimer.h>
38#include <linux/ktime.h>
37 39
38#include "ap_bus.h" 40#include "ap_bus.h"
39 41
40/* Some prototypes. */ 42/* Some prototypes. */
41static void ap_scan_bus(struct work_struct *); 43static void ap_scan_bus(struct work_struct *);
42static void ap_poll_all(unsigned long); 44static void ap_poll_all(unsigned long);
43static void ap_poll_timeout(unsigned long); 45static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
44static int ap_poll_thread_start(void); 46static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void); 47static void ap_poll_thread_stop(void);
46static void ap_request_timeout(unsigned long); 48static void ap_request_timeout(unsigned long);
@@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus);
80/* 82/*
81 * Tasklet & timer for AP request polling. 83 * Tasklet & timer for AP request polling.
82 */ 84 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
84static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 85static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
85static atomic_t ap_poll_requests = ATOMIC_INIT(0); 86static atomic_t ap_poll_requests = ATOMIC_INIT(0);
86static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 87static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
87static struct task_struct *ap_poll_kthread = NULL; 88static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex); 89static DEFINE_MUTEX(ap_poll_thread_mutex);
90static struct hrtimer ap_poll_timer;
91/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
92 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
93static unsigned long long poll_timeout = 250000;
89 94
90/** 95/**
91 * ap_intructions_available() - Test if AP instructions are available. 96 * ap_intructions_available() - Test if AP instructions are available.
@@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
636 641
637static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 642static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
638 643
644static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
645{
646 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
647}
648
649static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
650 size_t count)
651{
652 unsigned long long time;
653 ktime_t hr_time;
654
655 /* 120 seconds = maximum poll interval */
656 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000)
657 return -EINVAL;
658 poll_timeout = time;
659 hr_time = ktime_set(0, poll_timeout);
660
661 if (!hrtimer_is_queued(&ap_poll_timer) ||
662 !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) {
663 ap_poll_timer.expires = hr_time;
664 hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS);
665 }
666 return count;
667}
668
669static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
670
639static struct bus_attribute *const ap_bus_attrs[] = { 671static struct bus_attribute *const ap_bus_attrs[] = {
640 &bus_attr_ap_domain, 672 &bus_attr_ap_domain,
641 &bus_attr_config_time, 673 &bus_attr_config_time,
642 &bus_attr_poll_thread, 674 &bus_attr_poll_thread,
643 NULL 675 &bus_attr_poll_timeout,
676 NULL,
644}; 677};
645 678
646/** 679/**
@@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr)
895 */ 928 */
896static inline void ap_schedule_poll_timer(void) 929static inline void ap_schedule_poll_timer(void)
897{ 930{
898 if (timer_pending(&ap_poll_timer)) 931 if (hrtimer_is_queued(&ap_poll_timer))
899 return; 932 return;
900 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); 933 hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
934 HRTIMER_MODE_ABS);
901} 935}
902 936
903/** 937/**
@@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message);
1115 1149
1116/** 1150/**
1117 * ap_poll_timeout(): AP receive polling for finished AP requests. 1151 * ap_poll_timeout(): AP receive polling for finished AP requests.
1118 * @unused: Unused variable. 1152 * @unused: Unused pointer.
1119 * 1153 *
1120 * Schedules the AP tasklet. 1154 * Schedules the AP tasklet using a high resolution timer.
1121 */ 1155 */
1122static void ap_poll_timeout(unsigned long unused) 1156static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1123{ 1157{
1124 tasklet_schedule(&ap_tasklet); 1158 tasklet_schedule(&ap_tasklet);
1159 return HRTIMER_NORESTART;
1125} 1160}
1126 1161
1127/** 1162/**
@@ -1344,6 +1379,14 @@ int __init ap_module_init(void)
1344 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1379 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1345 add_timer(&ap_config_timer); 1380 add_timer(&ap_config_timer);
1346 1381
1382 /* Setup the high resultion poll timer.
1383 * If we are running under z/VM adjust polling to z/VM polling rate.
1384 */
1385 if (MACHINE_IS_VM)
1386 poll_timeout = 1500000;
1387 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1388 ap_poll_timer.function = ap_poll_timeout;
1389
1347 /* Start the low priority AP bus poll thread. */ 1390 /* Start the low priority AP bus poll thread. */
1348 if (ap_thread_flag) { 1391 if (ap_thread_flag) {
1349 rc = ap_poll_thread_start(); 1392 rc = ap_poll_thread_start();
@@ -1355,7 +1398,7 @@ int __init ap_module_init(void)
1355 1398
1356out_work: 1399out_work:
1357 del_timer_sync(&ap_config_timer); 1400 del_timer_sync(&ap_config_timer);
1358 del_timer_sync(&ap_poll_timer); 1401 hrtimer_cancel(&ap_poll_timer);
1359 destroy_workqueue(ap_work_queue); 1402 destroy_workqueue(ap_work_queue);
1360out_root: 1403out_root:
1361 s390_root_dev_unregister(ap_root_device); 1404 s390_root_dev_unregister(ap_root_device);
@@ -1386,7 +1429,7 @@ void ap_module_exit(void)
1386 ap_reset_domain(); 1429 ap_reset_domain();
1387 ap_poll_thread_stop(); 1430 ap_poll_thread_stop();
1388 del_timer_sync(&ap_config_timer); 1431 del_timer_sync(&ap_config_timer);
1389 del_timer_sync(&ap_poll_timer); 1432 hrtimer_cancel(&ap_poll_timer);
1390 destroy_workqueue(ap_work_queue); 1433 destroy_workqueue(ap_work_queue);
1391 tasklet_kill(&ap_tasklet); 1434 tasklet_kill(&ap_tasklet);
1392 s390_root_dev_unregister(ap_root_device); 1435 s390_root_dev_unregister(ap_root_device);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index c1e1200c43fc..446378b308fc 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -92,6 +92,8 @@ struct ap_queue_status {
92#define AP_DEVICE_TYPE_PCIXCC 5 92#define AP_DEVICE_TYPE_PCIXCC 5
93#define AP_DEVICE_TYPE_CEX2A 6 93#define AP_DEVICE_TYPE_CEX2A 6
94#define AP_DEVICE_TYPE_CEX2C 7 94#define AP_DEVICE_TYPE_CEX2C 7
95#define AP_DEVICE_TYPE_CEX2A2 8
96#define AP_DEVICE_TYPE_CEX2C2 9
95 97
96/* 98/*
97 * AP reset flag states 99 * AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e41c2fa86d8b..cb22b97944b8 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1071,10 +1071,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1071 1071
1072#define LBUFSIZE 1200UL 1072#define LBUFSIZE 1200UL
1073 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1073 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1074 if (!lbuf) { 1074 if (!lbuf)
1075 PRINTK("kmalloc failed!\n");
1076 return 0; 1075 return 0;
1077 }
1078 1076
1079 local_count = min(LBUFSIZE - 1, count); 1077 local_count = min(LBUFSIZE - 1, count);
1080 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1078 if (copy_from_user(lbuf, buffer, local_count) != 0) {
@@ -1084,23 +1082,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1084 lbuf[local_count] = '\0'; 1082 lbuf[local_count] = '\0';
1085 1083
1086 ptr = strstr(lbuf, "Online devices"); 1084 ptr = strstr(lbuf, "Online devices");
1087 if (!ptr) { 1085 if (!ptr)
1088 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1089 goto out; 1086 goto out;
1090 }
1091 ptr = strstr(ptr, "\n"); 1087 ptr = strstr(ptr, "\n");
1092 if (!ptr) { 1088 if (!ptr)
1093 PRINTK("Unable to parse data (missing newline "
1094 "after \"Online devices\")\n");
1095 goto out; 1089 goto out;
1096 }
1097 ptr++; 1090 ptr++;
1098 1091
1099 if (strstr(ptr, "Waiting work element counts") == NULL) { 1092 if (strstr(ptr, "Waiting work element counts") == NULL)
1100 PRINTK("Unable to parse data (missing "
1101 "\"Waiting work element counts\")\n");
1102 goto out; 1093 goto out;
1103 }
1104 1094
1105 for (j = 0; j < 64 && *ptr; ptr++) { 1095 for (j = 0; j < 64 && *ptr; ptr++) {
1106 /* 1096 /*
@@ -1200,16 +1190,12 @@ int __init zcrypt_api_init(void)
1200 1190
1201 /* Register the request sprayer. */ 1191 /* Register the request sprayer. */
1202 rc = misc_register(&zcrypt_misc_device); 1192 rc = misc_register(&zcrypt_misc_device);
1203 if (rc < 0) { 1193 if (rc < 0)
1204 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1205 zcrypt_misc_device.minor, rc);
1206 goto out; 1194 goto out;
1207 }
1208 1195
1209 /* Set up the proc file system */ 1196 /* Set up the proc file system */
1210 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1197 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1211 if (!zcrypt_entry) { 1198 if (!zcrypt_entry) {
1212 PRINTK("Couldn't create z90crypt proc entry\n");
1213 rc = -ENOMEM; 1199 rc = -ENOMEM;
1214 goto out_misc; 1200 goto out_misc;
1215 } 1201 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 5c6e222b2ac4..1d1ec74dadb2 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -30,34 +30,6 @@
30#ifndef _ZCRYPT_API_H_ 30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_ 31#define _ZCRYPT_API_H_
32 32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h" 33#include "ap_bus.h"
62#include <asm/zcrypt.h> 34#include <asm/zcrypt.h>
63 35
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 08657f604b8c..54f4cbc3be9e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -49,6 +49,7 @@
49 49
50static struct ap_device_id zcrypt_cex2a_ids[] = { 50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
52 { /* end of list */ }, 53 { /* end of list */ },
53}; 54};
54 55
@@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev,
242 return convert_type80(zdev, reply, 243 return convert_type80(zdev, reply,
243 outputdata, outputdatalength); 244 outputdata, outputdatalength);
244 default: /* Unknown response type, this should NEVER EVER happen */ 245 default: /* Unknown response type, this should NEVER EVER happen */
245 PRINTK("Unrecognized Message Header: %08x%08x\n",
246 *(unsigned int *) reply->message,
247 *(unsigned int *) (reply->message+4));
248 zdev->online = 0; 246 zdev->online = 0;
249 return -EAGAIN; /* repeat the request on a different device. */ 247 return -EAGAIN; /* repeat the request on a different device. */
250 } 248 }
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 3e27fe77d207..03ba27f05f92 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
92{ 92{
93 struct error_hdr *ehdr = reply->message; 93 struct error_hdr *ehdr = reply->message;
94 94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) { 95 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID: 96 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE: 97 case REP82_ERROR_OPERAND_SIZE:
@@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
123 zdev->online = 0; 119 zdev->online = 0;
124 return -EAGAIN; 120 return -EAGAIN;
125 default: 121 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0; 122 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */ 123 return -EAGAIN; /* repeat the request on a different device. */
130 } 124 }
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 6e93b4751782..12da4815ba8e 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev,
226 return convert_type84(zdev, reply, 226 return convert_type84(zdev, reply,
227 outputdata, outputdatalength); 227 outputdata, outputdatalength);
228 default: /* Unknown response type, this should NEVER EVER happen */ 228 default: /* Unknown response type, this should NEVER EVER happen */
229 PRINTK("Unrecognized Message Header: %08x%08x\n",
230 *(unsigned int *) reply->message,
231 *(unsigned int *) (reply->message+4));
232 zdev->online = 0; 229 zdev->online = 0;
233 return -EAGAIN; /* repeat the request on a different device. */ 230 return -EAGAIN; /* repeat the request on a different device. */
234 } 231 }
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 17ea56ce1c11..779952cb19fc 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev,
361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); 361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
362 if (unlikely(service_rc != 0)) { 362 if (unlikely(service_rc != 0)) {
363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode); 363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
364 if (service_rc == 8 && service_rs == 66) { 364 if (service_rc == 8 && service_rs == 66)
365 PDEBUG("Bad block format on PCICC\n");
366 return -EINVAL; 365 return -EINVAL;
367 } 366 if (service_rc == 8 && service_rs == 65)
368 if (service_rc == 8 && service_rs == 65) {
369 PDEBUG("Probably an even modulus on PCICC\n");
370 return -EINVAL; 367 return -EINVAL;
371 }
372 if (service_rc == 8 && service_rs == 770) { 368 if (service_rc == 8 && service_rs == 770) {
373 PDEBUG("Invalid key length on PCICC\n");
374 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 369 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
375 return -EAGAIN; 370 return -EAGAIN;
376 } 371 }
377 if (service_rc == 8 && service_rs == 783) { 372 if (service_rc == 8 && service_rs == 783) {
378 PDEBUG("Extended bitlengths not enabled on PCICC\n");
379 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
380 return -EAGAIN; 374 return -EAGAIN;
381 } 375 }
382 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
383 service_rc, service_rs);
384 zdev->online = 0; 376 zdev->online = 0;
385 return -EAGAIN; /* repeat the request on a different device. */ 377 return -EAGAIN; /* repeat the request on a different device. */
386 } 378 }
@@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev,
434 outputdata, outputdatalength); 426 outputdata, outputdatalength);
435 /* no break, incorrect cprb version is an unknown response */ 427 /* no break, incorrect cprb version is an unknown response */
436 default: /* Unknown response type, this should NEVER EVER happen */ 428 default: /* Unknown response type, this should NEVER EVER happen */
437 PRINTK("Unrecognized Message Header: %08x%08x\n",
438 *(unsigned int *) reply->message,
439 *(unsigned int *) (reply->message+4));
440 zdev->online = 0; 429 zdev->online = 0;
441 return -EAGAIN; /* repeat the request on a different device. */ 430 return -EAGAIN; /* repeat the request on a different device. */
442 } 431 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 0bc9b3188e64..d8ad36f81540 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -72,6 +72,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
75 { /* end of list */ }, 76 { /* end of list */ },
76}; 77};
77 78
@@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
289 ap_msg->length = sizeof(struct type6_hdr) + 290 ap_msg->length = sizeof(struct type6_hdr) +
290 CEIL4(xcRB->request_control_blk_length) + 291 CEIL4(xcRB->request_control_blk_length) +
291 xcRB->request_data_length; 292 xcRB->request_data_length;
292 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { 293 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
293 PRINTK("Combined message is too large (%ld/%d/%d).\n",
294 sizeof(struct type6_hdr),
295 xcRB->request_control_blk_length,
296 xcRB->request_data_length);
297 return -EFAULT; 294 return -EFAULT;
298 } 295 if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
299 if (CEIL4(xcRB->reply_control_blk_length) >
300 PCIXCC_MAX_XCRB_REPLY_SIZE) {
301 PDEBUG("Reply CPRB length is too large (%d).\n",
302 xcRB->request_control_blk_length);
303 return -EFAULT; 296 return -EFAULT;
304 } 297 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
305 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
306 PDEBUG("Reply data block length is too large (%d).\n",
307 xcRB->reply_data_length);
308 return -EFAULT; 298 return -EFAULT;
309 }
310 replylen = CEIL4(xcRB->reply_control_blk_length) + 299 replylen = CEIL4(xcRB->reply_control_blk_length) +
311 CEIL4(xcRB->reply_data_length) + 300 CEIL4(xcRB->reply_data_length) +
312 sizeof(struct type86_fmt2_msg); 301 sizeof(struct type86_fmt2_msg);
313 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { 302 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
314 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
315 " (%d/%d/%d).\n",
316 sizeof(struct type86_fmt2_msg),
317 xcRB->reply_control_blk_length,
318 xcRB->reply_data_length);
319 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - 303 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
320 (sizeof(struct type86_fmt2_msg) + 304 (sizeof(struct type86_fmt2_msg) +
321 CEIL4(xcRB->reply_data_length)); 305 CEIL4(xcRB->reply_data_length));
322 PDEBUG("Capping Reply CPRB length at %d\n",
323 xcRB->reply_control_blk_length);
324 } 306 }
325 307
326 /* prepare type6 header */ 308 /* prepare type6 header */
@@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
339 xcRB->request_control_blk_length)) 321 xcRB->request_control_blk_length))
340 return -EFAULT; 322 return -EFAULT;
341 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > 323 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
342 xcRB->request_control_blk_length) { 324 xcRB->request_control_blk_length)
343 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
344 xcRB->request_control_blk_length);
345 return -EFAULT; 325 return -EFAULT;
346 }
347 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
348 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
349 328
@@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
471 service_rc = msg->cprbx.ccp_rtcode; 450 service_rc = msg->cprbx.ccp_rtcode;
472 if (unlikely(service_rc != 0)) { 451 if (unlikely(service_rc != 0)) {
473 service_rs = msg->cprbx.ccp_rscode; 452 service_rs = msg->cprbx.ccp_rscode;
474 if (service_rc == 8 && service_rs == 66) { 453 if (service_rc == 8 && service_rs == 66)
475 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
476 return -EINVAL; 454 return -EINVAL;
477 } 455 if (service_rc == 8 && service_rs == 65)
478 if (service_rc == 8 && service_rs == 65) {
479 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
480 return -EINVAL; 456 return -EINVAL;
481 } 457 if (service_rc == 8 && service_rs == 770)
482 if (service_rc == 8 && service_rs == 770) {
483 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
484 return -EINVAL; 458 return -EINVAL;
485 }
486 if (service_rc == 8 && service_rs == 783) { 459 if (service_rc == 8 && service_rs == 783) {
487 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
488 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 460 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
489 return -EAGAIN; 461 return -EAGAIN;
490 } 462 }
491 if (service_rc == 12 && service_rs == 769) { 463 if (service_rc == 12 && service_rs == 769)
492 PDEBUG("Invalid key on PCIXCC/CEX2C\n");
493 return -EINVAL; 464 return -EINVAL;
494 }
495 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
496 service_rc, service_rs);
497 zdev->online = 0; 465 zdev->online = 0;
498 return -EAGAIN; /* repeat the request on a different device. */ 466 return -EAGAIN; /* repeat the request on a different device. */
499 } 467 }
@@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
569 } __attribute__((packed)) *msg = reply->message; 537 } __attribute__((packed)) *msg = reply->message;
570 char *data = reply->message; 538 char *data = reply->message;
571 539
572 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { 540 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
573 PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
574 rc, rs);
575 return -EINVAL; 541 return -EINVAL;
576 }
577 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); 542 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
578 return msg->fmt2.count2; 543 return msg->fmt2.count2;
579} 544}
@@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev,
598 outputdata, outputdatalength); 563 outputdata, outputdatalength);
599 /* no break, incorrect cprb version is an unknown response */ 564 /* no break, incorrect cprb version is an unknown response */
600 default: /* Unknown response type, this should NEVER EVER happen */ 565 default: /* Unknown response type, this should NEVER EVER happen */
601 PRINTK("Unrecognized Message Header: %08x%08x\n",
602 *(unsigned int *) reply->message,
603 *(unsigned int *) (reply->message+4));
604 zdev->online = 0; 566 zdev->online = 0;
605 return -EAGAIN; /* repeat the request on a different device. */ 567 return -EAGAIN; /* repeat the request on a different device. */
606 } 568 }
@@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
627 return convert_type86_xcrb(zdev, reply, xcRB); 589 return convert_type86_xcrb(zdev, reply, xcRB);
628 /* no break, incorrect cprb version is an unknown response */ 590 /* no break, incorrect cprb version is an unknown response */
629 default: /* Unknown response type, this should NEVER EVER happen */ 591 default: /* Unknown response type, this should NEVER EVER happen */
630 PRINTK("Unrecognized Message Header: %08x%08x\n",
631 *(unsigned int *) reply->message,
632 *(unsigned int *) (reply->message+4));
633 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 592 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
634 zdev->online = 0; 593 zdev->online = 0;
635 return -EAGAIN; /* repeat the request on a different device. */ 594 return -EAGAIN; /* repeat the request on a different device. */
@@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev,
653 return convert_type86_rng(zdev, reply, data); 612 return convert_type86_rng(zdev, reply, data);
654 /* no break, incorrect cprb version is an unknown response */ 613 /* no break, incorrect cprb version is an unknown response */
655 default: /* Unknown response type, this should NEVER EVER happen */ 614 default: /* Unknown response type, this should NEVER EVER happen */
656 PRINTK("Unrecognized Message Header: %08x%08x\n",
657 *(unsigned int *) reply->message,
658 *(unsigned int *) (reply->message+4));
659 zdev->online = 0; 615 zdev->online = 0;
660 return -EAGAIN; /* repeat the request on a different device. */ 616 return -EAGAIN; /* repeat the request on a different device. */
661 } 617 }
@@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
700 memcpy(msg->message, reply->message, length); 656 memcpy(msg->message, reply->message, length);
701 break; 657 break;
702 default: 658 default:
703 PRINTK("Invalid internal response type: %i\n", 659 memcpy(msg->message, &error_reply, sizeof error_reply);
704 resp_type->type);
705 memcpy(msg->message, &error_reply,
706 sizeof error_reply);
707 } 660 }
708 } else 661 } else
709 memcpy(msg->message, reply->message, sizeof error_reply); 662 memcpy(msg->message, reply->message, sizeof error_reply);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 04a1d7bf678c..c644669a75c2 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev,
703 if (!cdev->dev.driver_data) { 703 if (!cdev->dev.driver_data) {
704 printk(KERN_WARNING "claw: unsolicited interrupt for device:" 704 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
705 "%s received c-%02x d-%02x\n", 705 "%s received c-%02x d-%02x\n",
706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); 706 cdev->dev.bus_id, irb->scsw.cmd.cstat,
707 irb->scsw.cmd.dstat);
707#ifdef FUNCTRACE 708#ifdef FUNCTRACE
708 printk(KERN_INFO "claw: %s() " 709 printk(KERN_INFO "claw: %s() "
709 "exit on line %d\n",__func__,__LINE__); 710 "exit on line %d\n",__func__,__LINE__);
@@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev,
732#ifdef IOTRACE 733#ifdef IOTRACE
733 printk(KERN_INFO "%s: interrupt for device: %04x " 734 printk(KERN_INFO "%s: interrupt for device: %04x "
734 "received c-%02x d-%02x state-%02x\n", 735 "received c-%02x d-%02x state-%02x\n",
735 dev->name, p_ch->devno, irb->scsw.cstat, 736 dev->name, p_ch->devno, irb->scsw.cmd.cstat,
736 irb->scsw.dstat, p_ch->claw_state); 737 irb->scsw.cmd.dstat, p_ch->claw_state);
737#endif 738#endif
738 739
739 /* Copy interruption response block. */ 740 /* Copy interruption response block. */
740 memcpy(p_ch->irb, irb, sizeof(struct irb)); 741 memcpy(p_ch->irb, irb, sizeof(struct irb));
741 742
742 /* Check for good subchannel return code, otherwise error message */ 743 /* Check for good subchannel return code, otherwise error message */
743 if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { 744 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
744 printk(KERN_INFO "%s: subchannel check for device: %04x -" 745 printk(KERN_INFO "%s: subchannel check for device: %04x -"
745 " Sch Stat %02x Dev Stat %02x CPA - %04x\n", 746 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
746 dev->name, p_ch->devno, 747 dev->name, p_ch->devno,
747 irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); 748 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
749 irb->scsw.cmd.cpa);
748#ifdef IOTRACE 750#ifdef IOTRACE
749 dumpit((char *)irb,sizeof(struct irb)); 751 dumpit((char *)irb,sizeof(struct irb));
750 dumpit((char *)(unsigned long)irb->scsw.cpa, 752 dumpit((char *)(unsigned long)irb->scsw.cmd.cpa,
751 sizeof(struct ccw1)); 753 sizeof(struct ccw1));
752#endif 754#endif
753#ifdef FUNCTRACE 755#ifdef FUNCTRACE
@@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev,
759 } 761 }
760 762
761 /* Check the reason-code of a unit check */ 763 /* Check the reason-code of a unit check */
762 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 764 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
763 ccw_check_unit_check(p_ch, irb->ecw[0]); 765 ccw_check_unit_check(p_ch, irb->ecw[0]);
764 }
765 766
766 /* State machine to bring the connection up, down and to restart */ 767 /* State machine to bring the connection up, down and to restart */
767 p_ch->last_dstat = irb->scsw.dstat; 768 p_ch->last_dstat = irb->scsw.cmd.dstat;
768 769
769 switch (p_ch->claw_state) { 770 switch (p_ch->claw_state) {
770 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ 771 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
771#ifdef DEBUGMSG 772#ifdef DEBUGMSG
772 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); 773 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
773#endif 774#endif
774 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 775 if (!((p_ch->irb->scsw.cmd.stctl &
775 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 776 SCSW_STCTL_SEC_STATUS) ||
776 (p_ch->irb->scsw.stctl == 777 (p_ch->irb->scsw.cmd.stctl ==
777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 778 SCSW_STCTL_STATUS_PEND) ||
779 (p_ch->irb->scsw.cmd.stctl ==
780 (SCSW_STCTL_ALERT_STATUS |
781 SCSW_STCTL_STATUS_PEND)))) {
778#ifdef FUNCTRACE 782#ifdef FUNCTRACE
779 printk(KERN_INFO "%s:%s Exit on line %d\n", 783 printk(KERN_INFO "%s:%s Exit on line %d\n",
780 dev->name,__func__,__LINE__); 784 dev->name,__func__,__LINE__);
@@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev,
798 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", 802 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
799 dev->name); 803 dev->name);
800#endif 804#endif
801 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 805 if (!((p_ch->irb->scsw.cmd.stctl &
802 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 806 SCSW_STCTL_SEC_STATUS) ||
803 (p_ch->irb->scsw.stctl == 807 (p_ch->irb->scsw.cmd.stctl ==
804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 808 SCSW_STCTL_STATUS_PEND) ||
809 (p_ch->irb->scsw.cmd.stctl ==
810 (SCSW_STCTL_ALERT_STATUS |
811 SCSW_STCTL_STATUS_PEND)))) {
805#ifdef FUNCTRACE 812#ifdef FUNCTRACE
806 printk(KERN_INFO "%s:%s Exit on line %d\n", 813 printk(KERN_INFO "%s:%s Exit on line %d\n",
807 dev->name,__func__,__LINE__); 814 dev->name,__func__,__LINE__);
@@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev,
828 "interrupt for device:" 835 "interrupt for device:"
829 "%s received c-%02x d-%02x\n", 836 "%s received c-%02x d-%02x\n",
830 cdev->dev.bus_id, 837 cdev->dev.bus_id,
831 irb->scsw.cstat, 838 irb->scsw.cmd.cstat,
832 irb->scsw.dstat); 839 irb->scsw.cmd.dstat);
833 return; 840 return;
834 } 841 }
835#ifdef DEBUGMSG 842#ifdef DEBUGMSG
@@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev,
844 return; 851 return;
845 case CLAW_START_READ: 852 case CLAW_START_READ:
846 CLAW_DBF_TEXT(4,trace,"ReadIRQ"); 853 CLAW_DBF_TEXT(4,trace,"ReadIRQ");
847 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 854 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
848 clear_bit(0, (void *)&p_ch->IO_active); 855 clear_bit(0, (void *)&p_ch->IO_active);
849 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || 856 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
850 (p_ch->irb->ecw[0] & 0x40) == 0x40 || 857 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
@@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev,
863 CLAW_DBF_TEXT(4,trace,"notrdy"); 870 CLAW_DBF_TEXT(4,trace,"notrdy");
864 return; 871 return;
865 } 872 }
866 if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && 873 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
867 (p_ch->irb->scsw.dstat==0)) { 874 (p_ch->irb->scsw.cmd.dstat == 0)) {
868 if (test_and_set_bit(CLAW_BH_ACTIVE, 875 if (test_and_set_bit(CLAW_BH_ACTIVE,
869 (void *)&p_ch->flag_a) == 0) { 876 (void *)&p_ch->flag_a) == 0) {
870 tasklet_schedule(&p_ch->tasklet); 877 tasklet_schedule(&p_ch->tasklet);
@@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev,
879 CLAW_DBF_TEXT(4,trace,"PCI_read"); 886 CLAW_DBF_TEXT(4,trace,"PCI_read");
880 return; 887 return;
881 } 888 }
882 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 889 if (!((p_ch->irb->scsw.cmd.stctl &
883 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 890 SCSW_STCTL_SEC_STATUS) ||
884 (p_ch->irb->scsw.stctl == 891 (p_ch->irb->scsw.cmd.stctl ==
885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 892 SCSW_STCTL_STATUS_PEND) ||
893 (p_ch->irb->scsw.cmd.stctl ==
894 (SCSW_STCTL_ALERT_STATUS |
895 SCSW_STCTL_STATUS_PEND)))) {
886#ifdef FUNCTRACE 896#ifdef FUNCTRACE
887 printk(KERN_INFO "%s:%s Exit on line %d\n", 897 printk(KERN_INFO "%s:%s Exit on line %d\n",
888 dev->name,__func__,__LINE__); 898 dev->name,__func__,__LINE__);
@@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev,
911 CLAW_DBF_TEXT(4,trace,"RdIRQXit"); 921 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
912 return; 922 return;
913 case CLAW_START_WRITE: 923 case CLAW_START_WRITE:
914 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 924 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
915 printk(KERN_INFO "%s: Unit Check Occured in " 925 printk(KERN_INFO "%s: Unit Check Occured in "
916 "write channel\n",dev->name); 926 "write channel\n",dev->name);
917 clear_bit(0, (void *)&p_ch->IO_active); 927 clear_bit(0, (void *)&p_ch->IO_active);
@@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev,
934 CLAW_DBF_TEXT(4,trace,"rstrtwrt"); 944 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
935 return; 945 return;
936 } 946 }
937 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 947 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
938 clear_bit(0, (void *)&p_ch->IO_active); 948 clear_bit(0, (void *)&p_ch->IO_active);
939 printk(KERN_INFO "%s: Unit Exception " 949 printk(KERN_INFO "%s: Unit Exception "
940 "Occured in write channel\n", 950 "Occured in write channel\n",
941 dev->name); 951 dev->name);
942 } 952 }
943 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 953 if (!((p_ch->irb->scsw.cmd.stctl &
944 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 954 SCSW_STCTL_SEC_STATUS) ||
945 (p_ch->irb->scsw.stctl == 955 (p_ch->irb->scsw.cmd.stctl ==
946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 956 SCSW_STCTL_STATUS_PEND) ||
957 (p_ch->irb->scsw.cmd.stctl ==
958 (SCSW_STCTL_ALERT_STATUS |
959 SCSW_STCTL_STATUS_PEND)))) {
947#ifdef FUNCTRACE 960#ifdef FUNCTRACE
948 printk(KERN_INFO "%s:%s Exit on line %d\n", 961 printk(KERN_INFO "%s:%s Exit on line %d\n",
949 dev->name,__func__,__LINE__); 962 dev->name,__func__,__LINE__);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 2a106f3a076d..7e6bd387f4d8 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
257 if (duration > ch->prof.tx_time) 257 if (duration > ch->prof.tx_time)
258 ch->prof.tx_time = duration; 258 ch->prof.tx_time = duration;
259 259
260 if (ch->irb->scsw.count != 0) 260 if (ch->irb->scsw.cmd.count != 0)
261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
262 dev->name, ch->irb->scsw.count); 262 dev->name, ch->irb->scsw.cmd.count);
263 fsm_deltimer(&ch->timer); 263 fsm_deltimer(&ch->timer);
264 while ((skb = skb_dequeue(&ch->io_queue))) { 264 while ((skb = skb_dequeue(&ch->io_queue))) {
265 priv->stats.tx_packets++; 265 priv->stats.tx_packets++;
@@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
353 struct channel *ch = arg; 353 struct channel *ch = arg;
354 struct net_device *dev = ch->netdev; 354 struct net_device *dev = ch->netdev;
355 struct ctcm_priv *priv = dev->priv; 355 struct ctcm_priv *priv = dev->priv;
356 int len = ch->max_bufsize - ch->irb->scsw.count; 356 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
357 struct sk_buff *skb = ch->trans_skb; 357 struct sk_buff *skb = ch->trans_skb;
358 __u16 block_len = *((__u16 *)skb->data); 358 __u16 block_len = *((__u16 *)skb->data);
359 int check_len; 359 int check_len;
@@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1234 if (duration > ch->prof.tx_time) 1234 if (duration > ch->prof.tx_time)
1235 ch->prof.tx_time = duration; 1235 ch->prof.tx_time = duration;
1236 1236
1237 if (ch->irb->scsw.count != 0) 1237 if (ch->irb->scsw.cmd.count != 0)
1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
1239 dev->name, ch->irb->scsw.count); 1239 dev->name, ch->irb->scsw.cmd.count);
1240 fsm_deltimer(&ch->timer); 1240 fsm_deltimer(&ch->timer);
1241 while ((skb = skb_dequeue(&ch->io_queue))) { 1241 while ((skb = skb_dequeue(&ch->io_queue))) {
1242 priv->stats.tx_packets++; 1242 priv->stats.tx_packets++;
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1394 struct sk_buff *skb = ch->trans_skb; 1394 struct sk_buff *skb = ch->trans_skb;
1395 struct sk_buff *new_skb; 1395 struct sk_buff *new_skb;
1396 unsigned long saveflags = 0; /* avoids compiler warning */ 1396 unsigned long saveflags = 0; /* avoids compiler warning */
1397 int len = ch->max_bufsize - ch->irb->scsw.count; 1397 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1398 1398
1399 if (do_debug_data) { 1399 if (do_debug_data) {
1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", 1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index d52843da4f55..6b13c1c1beb8 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1236 /* Check for unsolicited interrupts. */ 1236 /* Check for unsolicited interrupts. */
1237 if (cgdev == NULL) { 1237 if (cgdev == NULL) {
1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", 1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
1239 cdev->dev.bus_id, irb->scsw.cstat, 1239 cdev->dev.bus_id, irb->scsw.cmd.cstat,
1240 irb->scsw.dstat); 1240 irb->scsw.cmd.dstat);
1241 return; 1241 return;
1242 } 1242 }
1243 1243
@@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1266 "received c-%02x d-%02x\n", 1266 "received c-%02x d-%02x\n",
1267 dev->name, 1267 dev->name,
1268 ch->id, 1268 ch->id,
1269 irb->scsw.cstat, 1269 irb->scsw.cmd.cstat,
1270 irb->scsw.dstat); 1270 irb->scsw.cmd.dstat);
1271 1271
1272 /* Copy interruption response block. */ 1272 /* Copy interruption response block. */
1273 memcpy(ch->irb, irb, sizeof(struct irb)); 1273 memcpy(ch->irb, irb, sizeof(struct irb));
1274 1274
1275 /* Check for good subchannel return code, otherwise error message */ 1275 /* Check for good subchannel return code, otherwise error message */
1276 if (irb->scsw.cstat) { 1276 if (irb->scsw.cmd.cstat) {
1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); 1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", 1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
1279 dev->name, ch->id, irb->scsw.cstat, 1279 dev->name, ch->id, irb->scsw.cmd.cstat,
1280 irb->scsw.dstat); 1280 irb->scsw.cmd.dstat);
1281 return; 1281 return;
1282 } 1282 }
1283 1283
1284 /* Check the reason-code of a unit check */ 1284 /* Check the reason-code of a unit check */
1285 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 1285 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
1286 ccw_unit_check(ch, irb->ecw[0]); 1286 ccw_unit_check(ch, irb->ecw[0]);
1287 return; 1287 return;
1288 } 1288 }
1289 if (irb->scsw.dstat & DEV_STAT_BUSY) { 1289 if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
1290 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 1290 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); 1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1292 else 1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); 1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1294 return; 1294 return;
1295 } 1295 }
1296 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1296 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); 1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1298 return; 1298 return;
1299 } 1299 }
1300 if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 1300 if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
1301 (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 1301 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
1302 (irb->scsw.stctl == 1302 (irb->scsw.cmd.stctl ==
1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) 1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); 1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1305 else 1305 else
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 8e7697305a4c..f4a32375c037 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -36,7 +36,6 @@ const char *cu3088_type[] = {
36 "CTC/A", 36 "CTC/A",
37 "ESCON channel", 37 "ESCON channel",
38 "FICON channel", 38 "FICON channel",
39 "P390 LCS card",
40 "OSA LCS card", 39 "OSA LCS card",
41 "CLAW channel device", 40 "CLAW channel device",
42 "unknown channel type", 41 "unknown channel type",
@@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = {
49 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, 48 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
50 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, 49 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
51 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, 50 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
52 { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
53 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, 51 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
54 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, 52 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
55 { /* end of list */ } 53 { /* end of list */ }
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
index 1753661f702a..d8558a7105a5 100644
--- a/drivers/s390/net/cu3088.h
+++ b/drivers/s390/net/cu3088.h
@@ -17,9 +17,6 @@ enum channel_types {
17 /* Device is a FICON channel */ 17 /* Device is a FICON channel */
18 channel_type_ficon, 18 channel_type_ficon,
19 19
20 /* Device is a P390 LCS card */
21 channel_type_p390,
22
23 /* Device is a OSA2 card */ 20 /* Device is a OSA2 card */
24 channel_type_osa2, 21 channel_type_osa2,
25 22
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index dd22f4b37037..6de28385b354 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1327 char *sense; 1327 char *sense;
1328 1328
1329 sense = (char *) irb->ecw; 1329 sense = (char *) irb->ecw;
1330 cstat = irb->scsw.cstat; 1330 cstat = irb->scsw.cmd.cstat;
1331 dstat = irb->scsw.dstat; 1331 dstat = irb->scsw.cmd.dstat;
1332 1332
1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1388 else 1388 else
1389 channel = &card->write; 1389 channel = &card->write;
1390 1390
1391 cstat = irb->scsw.cstat; 1391 cstat = irb->scsw.cmd.cstat;
1392 dstat = irb->scsw.dstat; 1392 dstat = irb->scsw.cmd.dstat;
1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); 1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
1394 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); 1394 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1395 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); 1395 irb->scsw.cmd.dstat);
1396 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1397 irb->scsw.cmd.actl);
1396 1398
1397 /* Check for channel and device errors presented */ 1399 /* Check for channel and device errors presented */
1398 rc = lcs_get_problem(cdev, irb); 1400 rc = lcs_get_problem(cdev, irb);
@@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1410 } 1412 }
1411 /* How far in the ccw chain have we processed? */ 1413 /* How far in the ccw chain have we processed? */
1412 if ((channel->state != LCS_CH_STATE_INIT) && 1414 if ((channel->state != LCS_CH_STATE_INIT) &&
1413 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1415 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) {
1414 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1416 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1415 - channel->ccws; 1417 - channel->ccws;
1416 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1418 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1417 (irb->scsw.cstat & SCHN_STAT_PCI)) 1419 (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1418 /* Bloody io subsystem tells us lies about cpa... */ 1420 /* Bloody io subsystem tells us lies about cpa... */
1419 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1421 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1420 while (channel->io_idx != index) { 1422 while (channel->io_idx != index) {
@@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1425 } 1427 }
1426 } 1428 }
1427 1429
1428 if ((irb->scsw.dstat & DEV_STAT_DEV_END) || 1430 if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1429 (irb->scsw.dstat & DEV_STAT_CHN_END) || 1431 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1430 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) 1432 (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1431 /* Mark channel as stopped. */ 1433 /* Mark channel as stopped. */
1432 channel->state = LCS_CH_STATE_STOPPED; 1434 channel->state = LCS_CH_STATE_STOPPED;
1433 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) 1435 else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1434 /* CCW execution stopped on a suspend bit. */ 1436 /* CCW execution stopped on a suspend bit. */
1435 channel->state = LCS_CH_STATE_SUSPENDED; 1437 channel->state = LCS_CH_STATE_SUSPENDED;
1436 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1438 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1437 if (irb->scsw.cc != 0) { 1439 if (irb->scsw.cmd.cc != 0) {
1438 ccw_device_halt(channel->ccwdev, (addr_t) channel); 1440 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1439 return; 1441 return;
1440 } 1442 }
1441 /* The channel has been stopped by halt_IO. */ 1443 /* The channel has been stopped by halt_IO. */
1442 channel->state = LCS_CH_STATE_HALTED; 1444 channel->state = LCS_CH_STATE_HALTED;
1443 } 1445 }
1444 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1446 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1445 channel->state = LCS_CH_STATE_CLEARED; 1447 channel->state = LCS_CH_STATE_CLEARED;
1446 }
1447 /* Do the rest in the tasklet. */ 1448 /* Do the rest in the tasklet. */
1448 tasklet_schedule(&channel->irq_tasklet); 1449 tasklet_schedule(&channel->irq_tasklet);
1449} 1450}
@@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1761 netif_carrier_off(card->dev); 1762 netif_carrier_off(card->dev);
1762 break; 1763 break;
1763 default: 1764 default:
1764 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); 1765 LCS_DBF_TEXT(5, trace, "noLGWcmd");
1765 break; 1766 break;
1766 } 1767 }
1767 } else 1768 } else
@@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2042 LCS_DBF_TEXT(2, setup, "add_dev"); 2043 LCS_DBF_TEXT(2, setup, "add_dev");
2043 card = lcs_alloc_card(); 2044 card = lcs_alloc_card();
2044 if (!card) { 2045 if (!card) {
2045 PRINT_ERR("Allocation of lcs card failed\n"); 2046 LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
2046 put_device(&ccwgdev->dev); 2047 put_device(&ccwgdev->dev);
2047 return -ENOMEM; 2048 return -ENOMEM;
2048 } 2049 }
2049 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); 2050 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2050 if (ret) { 2051 if (ret) {
2051 PRINT_ERR("Creating attributes failed");
2052 lcs_free_card(card); 2052 lcs_free_card(card);
2053 put_device(&ccwgdev->dev); 2053 put_device(&ccwgdev->dev);
2054 return ret; 2054 return ret;
@@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2140 default: 2140 default:
2141 LCS_DBF_TEXT(3, setup, "errinit"); 2141 LCS_DBF_TEXT(3, setup, "errinit");
2142 PRINT_ERR("LCS: Initialization failed\n"); 2142 PRINT_ERR("LCS: Initialization failed\n");
2143 PRINT_ERR("LCS: No device found!\n");
2144 goto out; 2143 goto out;
2145 } 2144 }
2146 if (!dev) 2145 if (!dev)
@@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2269 if (!card) 2268 if (!card)
2270 return; 2269 return;
2271 2270
2272 PRINT_INFO("Removing lcs group device ....\n");
2273 LCS_DBF_TEXT(3, setup, "remdev"); 2271 LCS_DBF_TEXT(3, setup, "remdev");
2274 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2272 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2275 if (ccwgdev->state == CCWGROUP_ONLINE) { 2273 if (ccwgdev->state == CCWGROUP_ONLINE) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index e4ba6a0372ac..9242b5acc66b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
625 offset += header->next; 625 offset += header->next;
626 header->next -= NETIUCV_HDRLEN; 626 header->next -= NETIUCV_HDRLEN;
627 if (skb_tailroom(pskb) < header->next) { 627 if (skb_tailroom(pskb) < header->next) {
628 PRINT_WARN("%s: Illegal next field in iucv header: "
629 "%d > %d\n",
630 dev->name, header->next, skb_tailroom(pskb));
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", 628 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb)); 629 header->next, skb_tailroom(pskb));
633 return; 630 return;
@@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
636 skb_reset_mac_header(pskb); 633 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len); 634 skb = dev_alloc_skb(pskb->len);
638 if (!skb) { 635 if (!skb) {
639 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
640 dev->name);
641 IUCV_DBF_TEXT(data, 2, 636 IUCV_DBF_TEXT(data, 2,
642 "Out of memory in netiucv_unpack_skb\n"); 637 "Out of memory in netiucv_unpack_skb\n");
643 privptr->stats.rx_dropped++; 638 privptr->stats.rx_dropped++;
@@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
674 669
675 if (!conn->netdev) { 670 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg); 671 iucv_message_reject(conn->path, msg);
677 PRINT_WARN("Received data for unlinked connection\n");
678 IUCV_DBF_TEXT(data, 2, 672 IUCV_DBF_TEXT(data, 2,
679 "Received data for unlinked connection\n"); 673 "Received data for unlinked connection\n");
680 return; 674 return;
@@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
682 if (msg->length > conn->max_buffsize) { 676 if (msg->length > conn->max_buffsize) {
683 iucv_message_reject(conn->path, msg); 677 iucv_message_reject(conn->path, msg);
684 privptr->stats.rx_dropped++; 678 privptr->stats.rx_dropped++;
685 PRINT_WARN("msglen %d > max_buffsize %d\n",
686 msg->length, conn->max_buffsize);
687 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", 679 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize); 680 msg->length, conn->max_buffsize);
689 return; 681 return;
@@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
695 msg->length, NULL); 687 msg->length, NULL);
696 if (rc || msg->length < 5) { 688 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++; 689 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); 690 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return; 691 return;
701 } 692 }
@@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
778 fsm_newstate(fi, CONN_STATE_IDLE); 769 fsm_newstate(fi, CONN_STATE_IDLE);
779 if (privptr) 770 if (privptr)
780 privptr->stats.tx_errors += txpackets; 771 privptr->stats.tx_errors += txpackets;
781 PRINT_WARN("iucv_send returned %08x\n", rc);
782 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 772 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
783 } else { 773 } else {
784 if (privptr) { 774 if (privptr) {
@@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
806 path->flags = 0; 796 path->flags = 0;
807 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); 797 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
808 if (rc) { 798 if (rc) {
809 PRINT_WARN("%s: IUCV accept failed with error %d\n",
810 netdev->name, rc);
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); 799 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return; 800 return;
813 } 801 }
@@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
873 IUCV_DBF_TEXT(trace, 3, __func__); 861 IUCV_DBF_TEXT(trace, 3, __func__);
874 862
875 fsm_newstate(fi, CONN_STATE_STARTWAIT); 863 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n", 864 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
877 conn->netdev->name, conn->userid); 865 conn->netdev->name, conn->userid);
878 866
879 /* 867 /*
@@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg)
968 struct iucv_connection *conn = arg; 956 struct iucv_connection *conn = arg;
969 struct net_device *netdev = conn->netdev; 957 struct net_device *netdev = conn->netdev;
970 958
971 PRINT_WARN("%s: Cannot connect without username\n", netdev->name); 959 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
972 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); 960 netdev->name, conn->userid);
973} 961}
974 962
975static const fsm_node conn_fsm[] = { 963static const fsm_node conn_fsm[] = {
@@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1077 "connection is up and running\n"); 1065 "connection is up and running\n");
1078 break; 1066 break;
1079 case DEV_STATE_STOPWAIT: 1067 case DEV_STATE_STOPWAIT:
1080 PRINT_INFO(
1081 "%s: got connection UP event during shutdown!\n",
1082 dev->name);
1083 IUCV_DBF_TEXT(data, 2, 1068 IUCV_DBF_TEXT(data, 2,
1084 "dev_action_connup: in DEV_STATE_STOPWAIT\n"); 1069 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1085 break; 1070 break;
@@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1174 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + 1159 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1175 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); 1160 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1176 if (!nskb) { 1161 if (!nskb) {
1177 PRINT_WARN("%s: Could not allocate tx_skb\n",
1178 conn->netdev->name);
1179 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); 1162 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1180 rc = -ENOMEM; 1163 rc = -ENOMEM;
1181 return rc; 1164 return rc;
@@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1223 skb_pull(skb, NETIUCV_HDRLEN); 1206 skb_pull(skb, NETIUCV_HDRLEN);
1224 skb_trim(skb, skb->len - NETIUCV_HDRLEN); 1207 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1225 } 1208 }
1226 PRINT_WARN("iucv_send returned %08x\n", rc);
1227 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 1209 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1228 } else { 1210 } else {
1229 if (copied) 1211 if (copied)
@@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1293 * Some sanity checks ... 1275 * Some sanity checks ...
1294 */ 1276 */
1295 if (skb == NULL) { 1277 if (skb == NULL) {
1296 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1297 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); 1278 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1298 privptr->stats.tx_dropped++; 1279 privptr->stats.tx_dropped++;
1299 return 0; 1280 return 0;
1300 } 1281 }
1301 if (skb_headroom(skb) < NETIUCV_HDRLEN) { 1282 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1302 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1303 dev->name, NETIUCV_HDRLEN);
1304 IUCV_DBF_TEXT(data, 2, 1283 IUCV_DBF_TEXT(data, 2,
1305 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); 1284 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1306 dev_kfree_skb(skb); 1285 dev_kfree_skb(skb);
@@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393 1372
1394 IUCV_DBF_TEXT(trace, 3, __func__); 1373 IUCV_DBF_TEXT(trace, 3, __func__);
1395 if (count > 9) { 1374 if (count > 9) {
1396 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1397 IUCV_DBF_TEXT_(setup, 2, 1375 IUCV_DBF_TEXT_(setup, 2,
1398 "%d is length of username\n", (int) count); 1376 "%d is length of username\n", (int) count);
1399 return -EINVAL; 1377 return -EINVAL;
@@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1409 /* trailing lf, grr */ 1387 /* trailing lf, grr */
1410 break; 1388 break;
1411 } 1389 }
1412 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1413 IUCV_DBF_TEXT_(setup, 2, 1390 IUCV_DBF_TEXT_(setup, 2,
1414 "username: invalid character %c\n", *p); 1391 "username: invalid character %c\n", *p);
1415 return -EINVAL; 1392 return -EINVAL;
@@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1421 if (memcmp(username, priv->conn->userid, 9) && 1398 if (memcmp(username, priv->conn->userid, 9) &&
1422 (ndev->flags & (IFF_UP | IFF_RUNNING))) { 1399 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1423 /* username changed while the interface is active. */ 1400 /* username changed while the interface is active. */
1424 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1425 dev->bus_id, priv->conn->userid);
1426 PRINT_WARN("netiucv: user cannot be updated\n");
1427 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); 1401 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1428 return -EBUSY; 1402 return -EPERM;
1429 } 1403 }
1430 read_lock_bh(&iucv_connection_rwlock); 1404 read_lock_bh(&iucv_connection_rwlock);
1431 list_for_each_entry(cp, &iucv_connection_list, list) { 1405 list_for_each_entry(cp, &iucv_connection_list, list) {
1432 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { 1406 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1433 read_unlock_bh(&iucv_connection_rwlock); 1407 read_unlock_bh(&iucv_connection_rwlock);
1434 PRINT_WARN("netiucv: Connection to %s already " 1408 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1435 "exists\n", username); 1409 "to %s already exists\n", username);
1436 return -EEXIST; 1410 return -EEXIST;
1437 } 1411 }
1438 } 1412 }
@@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1466 bs1 = simple_strtoul(buf, &e, 0); 1440 bs1 = simple_strtoul(buf, &e, 0);
1467 1441
1468 if (e && (!isspace(*e))) { 1442 if (e && (!isspace(*e))) {
1469 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1470 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); 1443 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1471 return -EINVAL; 1444 return -EINVAL;
1472 } 1445 }
1473 if (bs1 > NETIUCV_BUFSIZE_MAX) { 1446 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1474 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2, 1447 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too large\n", 1448 "buffer_write: buffer size %d too large\n",
1478 bs1); 1449 bs1);
@@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1480 } 1451 }
1481 if ((ndev->flags & IFF_RUNNING) && 1452 if ((ndev->flags & IFF_RUNNING) &&
1482 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { 1453 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1483 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1484 bs1);
1485 IUCV_DBF_TEXT_(setup, 2, 1454 IUCV_DBF_TEXT_(setup, 2,
1486 "buffer_write: buffer size %d too small\n", 1455 "buffer_write: buffer size %d too small\n",
1487 bs1); 1456 bs1);
1488 return -EINVAL; 1457 return -EINVAL;
1489 } 1458 }
1490 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { 1459 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1491 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1492 bs1);
1493 IUCV_DBF_TEXT_(setup, 2, 1460 IUCV_DBF_TEXT_(setup, 2,
1494 "buffer_write: buffer size %d too small\n", 1461 "buffer_write: buffer size %d too small\n",
1495 bs1); 1462 bs1);
@@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv,
1963 1930
1964 IUCV_DBF_TEXT(trace, 3, __func__); 1931 IUCV_DBF_TEXT(trace, 3, __func__);
1965 if (count>9) { 1932 if (count>9) {
1966 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1967 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); 1933 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1968 return -EINVAL; 1934 return -EINVAL;
1969 } 1935 }
@@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv,
1976 if (*p == '\n') 1942 if (*p == '\n')
1977 /* trailing lf, grr */ 1943 /* trailing lf, grr */
1978 break; 1944 break;
1979 PRINT_WARN("netiucv: Invalid character in username!\n");
1980 IUCV_DBF_TEXT_(setup, 2, 1945 IUCV_DBF_TEXT_(setup, 2,
1981 "conn_write: invalid character %c\n", *p); 1946 "conn_write: invalid character %c\n", *p);
1982 return -EINVAL; 1947 return -EINVAL;
@@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv,
1989 list_for_each_entry(cp, &iucv_connection_list, list) { 1954 list_for_each_entry(cp, &iucv_connection_list, list) {
1990 if (!strncmp(username, cp->userid, 9)) { 1955 if (!strncmp(username, cp->userid, 9)) {
1991 read_unlock_bh(&iucv_connection_rwlock); 1956 read_unlock_bh(&iucv_connection_rwlock);
1992 PRINT_WARN("netiucv: Connection to %s already " 1957 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1993 "exists\n", username); 1958 "to %s already exists\n", username);
1994 return -EEXIST; 1959 return -EEXIST;
1995 } 1960 }
1996 } 1961 }
@@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv,
1998 1963
1999 dev = netiucv_init_netdevice(username); 1964 dev = netiucv_init_netdevice(username);
2000 if (!dev) { 1965 if (!dev) {
2001 PRINT_WARN("netiucv: Could not allocate network device "
2002 "structure for user '%s'\n",
2003 netiucv_printname(username));
2004 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 1966 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2005 return -ENODEV; 1967 return -ENODEV;
2006 } 1968 }
@@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv,
2020 if (rc) 1982 if (rc)
2021 goto out_unreg; 1983 goto out_unreg;
2022 1984
2023 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2024 1985
2025 return count; 1986 return count;
2026 1987
2027out_unreg: 1988out_unreg:
2028 netiucv_unregister_device(priv->dev); 1989 netiucv_unregister_device(priv->dev);
2029out_free_ndev: 1990out_free_ndev:
2030 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2031 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2032 netiucv_free_netdevice(dev); 1991 netiucv_free_netdevice(dev);
2033 return rc; 1992 return rc;
2034} 1993}
@@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv,
2073 PRINT_WARN("netiucv: %s cannot be removed\n", 2032 PRINT_WARN("netiucv: %s cannot be removed\n",
2074 ndev->name); 2033 ndev->name);
2075 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); 2034 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2076 return -EBUSY; 2035 return -EPERM;
2077 } 2036 }
2078 unregister_netdev(ndev); 2037 unregister_netdev(ndev);
2079 netiucv_unregister_device(dev); 2038 netiucv_unregister_device(dev);
2080 return count; 2039 return count;
2081 } 2040 }
2082 read_unlock_bh(&iucv_connection_rwlock); 2041 read_unlock_bh(&iucv_connection_rwlock);
2083 PRINT_WARN("netiucv: net device %s unknown\n", name);
2084 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); 2042 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2085 return -EINVAL; 2043 return -EINVAL;
2086} 2044}
@@ -2148,7 +2106,6 @@ static int __init netiucv_init(void)
2148 netiucv_driver.groups = netiucv_drv_attr_groups; 2106 netiucv_driver.groups = netiucv_drv_attr_groups;
2149 rc = driver_register(&netiucv_driver); 2107 rc = driver_register(&netiucv_driver);
2150 if (rc) { 2108 if (rc) {
2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
2152 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); 2109 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2153 goto out_iucv; 2110 goto out_iucv;
2154 } 2111 }
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9a71dae223e8..0ac54dc638c2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
420 QETH_DBF_TEXT(TRACE, 3, "urla"); 420 QETH_DBF_TEXT(TRACE, 3, "urla");
421 break; 421 break;
422 default: 422 default:
423 PRINT_WARN("Received data is IPA " 423 QETH_DBF_MESSAGE(2, "Received data is IPA "
424 "but not a reply!\n"); 424 "but not a reply!\n");
425 break; 425 break;
426 } 426 }
@@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
735 char *sense; 735 char *sense;
736 736
737 sense = (char *) irb->ecw; 737 sense = (char *) irb->ecw;
738 cstat = irb->scsw.cstat; 738 cstat = irb->scsw.cmd.cstat;
739 dstat = irb->scsw.dstat; 739 dstat = irb->scsw.cmd.dstat;
740 740
741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
823 823
824 if (__qeth_check_irb_error(cdev, intparm, irb)) 824 if (__qeth_check_irb_error(cdev, intparm, irb))
825 return; 825 return;
826 cstat = irb->scsw.cstat; 826 cstat = irb->scsw.cmd.cstat;
827 dstat = irb->scsw.dstat; 827 dstat = irb->scsw.cmd.dstat;
828 828
829 card = CARD_FROM_CDEV(cdev); 829 card = CARD_FROM_CDEV(cdev);
830 if (!card) 830 if (!card)
@@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
842 } 842 }
843 atomic_set(&channel->irq_pending, 0); 843 atomic_set(&channel->irq_pending, 0);
844 844
845 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) 845 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
846 channel->state = CH_STATE_STOPPED; 846 channel->state = CH_STATE_STOPPED;
847 847
848 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) 848 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
849 channel->state = CH_STATE_HALTED; 849 channel->state = CH_STATE_HALTED;
850 850
851 /*let's wake up immediately on data channel*/ 851 /*let's wake up immediately on data channel*/
@@ -4092,7 +4092,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4092 4092
4093 rc = qeth_determine_card_type(card); 4093 rc = qeth_determine_card_type(card);
4094 if (rc) { 4094 if (rc) {
4095 PRINT_WARN("%s: not a valid card type\n", __func__);
4096 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 4095 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4097 goto err_card; 4096 goto err_card;
4098 } 4097 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 999552c83bbe..06deaee50f6d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
944 else 944 else
945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, 945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
946 addr->del_flags); 946 addr->del_flags);
947 if (rc) { 947 if (rc)
948 QETH_DBF_TEXT(TRACE, 2, "failed"); 948 QETH_DBF_TEXT(TRACE, 2, "failed");
949 /* TODO: re-activate this warning as soon as we have a
950 * clean mirco code
951 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
952 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
953 buf, rc);
954 */
955 }
956 949
957 return rc; 950 return rc;
958} 951}
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 8735a415a116..164e090c2625 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -156,11 +156,8 @@ static int __init smsg_init(void)
156 if (rc != 0) 156 if (rc != 0)
157 goto out; 157 goto out;
158 rc = iucv_register(&smsg_handler, 1); 158 rc = iucv_register(&smsg_handler, 1);
159 if (rc) { 159 if (rc)
160 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
161 rc = -EIO; /* better errno ? */
162 goto out_driver; 160 goto out_driver;
163 }
164 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); 161 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
165 if (!smsg_path) { 162 if (!smsg_path) {
166 rc = -ENOMEM; 163 rc = -ENOMEM;
@@ -168,11 +165,8 @@ static int __init smsg_init(void)
168 } 165 }
169 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 166 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
170 NULL, NULL, NULL); 167 NULL, NULL, NULL);
171 if (rc) { 168 if (rc)
172 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
173 rc = -EIO; /* better errno ? */
174 goto out_free; 169 goto out_free;
175 }
176 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 170 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
177 return 0; 171 return 0;
178 172
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5bfbe7659830..834e9ee7e934 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -2,10 +2,10 @@
2 * drivers/s390/s390mach.c 2 * drivers/s390/s390mach.c
3 * S/390 machine check handler 3 * S/390 machine check handler
4 * 4 *
5 * S390 version 5 * Copyright IBM Corp. 2000,2008
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
@@ -18,10 +18,6 @@
18#include <asm/etr.h> 18#include <asm/etr.h>
19#include <asm/lowcore.h> 19#include <asm/lowcore.h>
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
24#include "cio/chp.h"
25#include "s390mach.h" 21#include "s390mach.h"
26 22
27static struct semaphore m_sem; 23static struct semaphore m_sem;
@@ -36,13 +32,40 @@ s390_handle_damage(char *msg)
36 for(;;); 32 for(;;);
37} 33}
38 34
35static crw_handler_t crw_handlers[NR_RSCS];
36
37/**
38 * s390_register_crw_handler() - register a channel report word handler
39 * @rsc: reporting source code to handle
40 * @handler: handler to be registered
41 *
42 * Returns %0 on success and a negative error value otherwise.
43 */
44int s390_register_crw_handler(int rsc, crw_handler_t handler)
45{
46 if ((rsc < 0) || (rsc >= NR_RSCS))
47 return -EINVAL;
48 if (!cmpxchg(&crw_handlers[rsc], NULL, handler))
49 return 0;
50 return -EBUSY;
51}
52
53/**
54 * s390_unregister_crw_handler() - unregister a channel report word handler
55 * @rsc: reporting source code to handle
56 */
57void s390_unregister_crw_handler(int rsc)
58{
59 if ((rsc < 0) || (rsc >= NR_RSCS))
60 return;
61 xchg(&crw_handlers[rsc], NULL);
62 synchronize_sched();
63}
64
39/* 65/*
40 * Retrieve CRWs and call function to handle event. 66 * Retrieve CRWs and call function to handle event.
41 *
42 * Note : we currently process CRWs for io and chsc subchannels only
43 */ 67 */
44static int 68static int s390_collect_crw_info(void *param)
45s390_collect_crw_info(void *param)
46{ 69{
47 struct crw crw[2]; 70 struct crw crw[2];
48 int ccode; 71 int ccode;
@@ -84,57 +107,24 @@ repeat:
84 crw[chain].rsid); 107 crw[chain].rsid);
85 /* Check for overflows. */ 108 /* Check for overflows. */
86 if (crw[chain].oflw) { 109 if (crw[chain].oflw) {
110 int i;
111
87 pr_debug("%s: crw overflow detected!\n", __func__); 112 pr_debug("%s: crw overflow detected!\n", __func__);
88 css_schedule_eval_all(); 113 for (i = 0; i < NR_RSCS; i++) {
114 if (crw_handlers[i])
115 crw_handlers[i](NULL, NULL, 1);
116 }
89 chain = 0; 117 chain = 0;
90 continue; 118 continue;
91 } 119 }
92 switch (crw[chain].rsc) { 120 if (crw[0].chn && !chain) {
93 case CRW_RSC_SCH: 121 chain++;
94 if (crw[0].chn && !chain) 122 continue;
95 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 break;
99 case CRW_RSC_MONITOR:
100 pr_debug("source is monitoring facility\n");
101 break;
102 case CRW_RSC_CPATH:
103 pr_debug("source is channel path %02X\n", crw[0].rsid);
104 /*
105 * Check for solicited machine checks. These are
106 * created by reset channel path and need not be
107 * reported to the common I/O layer.
108 */
109 if (crw[chain].slct) {
110 pr_debug("solicited machine check for "
111 "channel path %02X\n", crw[0].rsid);
112 break;
113 }
114 switch (crw[0].erc) {
115 case CRW_ERC_IPARM: /* Path has come. */
116 chp_process_crw(crw[0].rsid, 1);
117 break;
118 case CRW_ERC_PERRI: /* Path has gone. */
119 case CRW_ERC_PERRN:
120 chp_process_crw(crw[0].rsid, 0);
121 break;
122 default:
123 pr_debug("Don't know how to handle erc=%x\n",
124 crw[0].erc);
125 }
126 break;
127 case CRW_RSC_CONFIG:
128 pr_debug("source is configuration-alert facility\n");
129 break;
130 case CRW_RSC_CSS:
131 pr_debug("source is channel subsystem\n");
132 chsc_process_crw();
133 break;
134 default:
135 pr_debug("unknown source\n");
136 break;
137 } 123 }
124 if (crw_handlers[crw[chain].rsc])
125 crw_handlers[crw[chain].rsc](&crw[0],
126 chain ? &crw[1] : NULL,
127 0);
138 /* chain is always 0 or 1 here. */ 128 /* chain is always 0 or 1 here. */
139 chain = crw[chain].chn ? chain + 1 : 0; 129 chain = crw[chain].chn ? chain + 1 : 0;
140 } 130 }
@@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs)
468 etr_sync_check(); 458 etr_sync_check();
469 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 459 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
470 etr_switch_to_local(); 460 etr_switch_to_local();
461 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
462 stp_sync_check();
463 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
464 stp_island_check();
471 } 465 }
472 466
473 if (mci->se) 467 if (mci->se)
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index ca681f9b67fc..d39f8b697d27 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -72,6 +72,13 @@ struct crw {
72 __u32 rsid : 16; /* reporting-source ID */ 72 __u32 rsid : 16; /* reporting-source ID */
73} __attribute__ ((packed)); 73} __attribute__ ((packed));
74 74
75typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
76
77extern int s390_register_crw_handler(int rsc, crw_handler_t handler);
78extern void s390_unregister_crw_handler(int rsc);
79
80#define NR_RSCS 16
81
75#define CRW_RSC_MONITOR 0x2 /* monitoring facility */ 82#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
76#define CRW_RSC_SCH 0x3 /* subchannel */ 83#define CRW_RSC_SCH 0x3 /* subchannel */
77#define CRW_RSC_CPATH 0x4 /* channel path */ 84#define CRW_RSC_CPATH 0x4 /* channel path */
@@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw )
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */ 112#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 113#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107 114
115#define ED_STP_SYNC 7 /* External damage STP sync check */
116#define ED_STP_ISLAND 6 /* External damage STP island check */
117
108struct pt_regs; 118struct pt_regs;
109 119
110void s390_handle_mcck(void); 120void s390_handle_mcck(void);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cee1e4c6d83c..fccd2e88d600 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -183,8 +183,9 @@ static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
183 int tablesize); 183 int tablesize);
184static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 184static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
185 Sg_request * srp); 185 Sg_request * srp);
186static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 186static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
187 int blocking, int read_only, Sg_request ** o_srp); 187 const char __user *buf, size_t count, int blocking,
188 int read_only, Sg_request **o_srp);
188static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
189 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
190static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
@@ -205,7 +206,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
205static Sg_request *sg_add_request(Sg_fd * sfp); 206static Sg_request *sg_add_request(Sg_fd * sfp);
206static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
207static int sg_res_in_use(Sg_fd * sfp); 208static int sg_res_in_use(Sg_fd * sfp);
208static int sg_allow_access(unsigned char opcode, char dev_type);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 210static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 211#ifdef CONFIG_SCSI_PROC_FS
@@ -554,7 +554,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
554 return -EFAULT; 554 return -EFAULT;
555 blocking = !(filp->f_flags & O_NONBLOCK); 555 blocking = !(filp->f_flags & O_NONBLOCK);
556 if (old_hdr.reply_len < 0) 556 if (old_hdr.reply_len < 0)
557 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 557 return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
558 if (count < (SZ_SG_HEADER + 6)) 558 if (count < (SZ_SG_HEADER + 6))
559 return -EIO; /* The minimum scsi command length is 6 bytes. */ 559 return -EIO; /* The minimum scsi command length is 6 bytes. */
560 560
@@ -631,8 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
631} 631}
632 632
633static ssize_t 633static ssize_t
634sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 634sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
635 int blocking, int read_only, Sg_request ** o_srp) 635 size_t count, int blocking, int read_only,
636 Sg_request **o_srp)
636{ 637{
637 int k; 638 int k;
638 Sg_request *srp; 639 Sg_request *srp;
@@ -688,8 +689,7 @@ sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
688 sg_remove_request(sfp, srp); 689 sg_remove_request(sfp, srp);
689 return -EFAULT; 690 return -EFAULT;
690 } 691 }
691 if (read_only && 692 if (read_only && !blk_verify_command(file, cmnd)) {
692 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
693 sg_remove_request(sfp, srp); 693 sg_remove_request(sfp, srp);
694 return -EPERM; 694 return -EPERM;
695 } 695 }
@@ -809,7 +809,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
809 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 809 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
810 return -EFAULT; 810 return -EFAULT;
811 result = 811 result =
812 sg_new_write(sfp, p, SZ_SG_IO_HDR, 812 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
813 blocking, read_only, &srp); 813 blocking, read_only, &srp);
814 if (result < 0) 814 if (result < 0)
815 return result; 815 return result;
@@ -1058,7 +1058,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1058 1058
1059 if (copy_from_user(&opcode, siocp->data, 1)) 1059 if (copy_from_user(&opcode, siocp->data, 1))
1060 return -EFAULT; 1060 return -EFAULT;
1061 if (!sg_allow_access(opcode, sdp->device->type)) 1061 if (!blk_verify_command(filp, &opcode))
1062 return -EPERM; 1062 return -EPERM;
1063 } 1063 }
1064 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1064 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
@@ -2512,30 +2512,6 @@ sg_page_free(struct page *page, int size)
2512 __free_pages(page, order); 2512 __free_pages(page, order);
2513} 2513}
2514 2514
2515#ifndef MAINTENANCE_IN_CMD
2516#define MAINTENANCE_IN_CMD 0xa3
2517#endif
2518
2519static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2520 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2521 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2522 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2523};
2524
2525static int
2526sg_allow_access(unsigned char opcode, char dev_type)
2527{
2528 int k;
2529
2530 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2531 return 1;
2532 for (k = 0; k < sizeof (allow_ops); ++k) {
2533 if (opcode == allow_ops[k])
2534 return 1;
2535 }
2536 return 0;
2537}
2538
2539#ifdef CONFIG_SCSI_PROC_FS 2515#ifdef CONFIG_SCSI_PROC_FS
2540static int 2516static int
2541sg_idr_max_id(int id, void *p, void *data) 2517sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index c82df8bd4d89..27f5bfd1def3 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -673,24 +673,20 @@ fail:
673static void get_sectorsize(struct scsi_cd *cd) 673static void get_sectorsize(struct scsi_cd *cd)
674{ 674{
675 unsigned char cmd[10]; 675 unsigned char cmd[10];
676 unsigned char *buffer; 676 unsigned char buffer[8];
677 int the_result, retries = 3; 677 int the_result, retries = 3;
678 int sector_size; 678 int sector_size;
679 struct request_queue *queue; 679 struct request_queue *queue;
680 680
681 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
682 if (!buffer)
683 goto Enomem;
684
685 do { 681 do {
686 cmd[0] = READ_CAPACITY; 682 cmd[0] = READ_CAPACITY;
687 memset((void *) &cmd[1], 0, 9); 683 memset((void *) &cmd[1], 0, 9);
688 memset(buffer, 0, 8); 684 memset(buffer, 0, sizeof(buffer));
689 685
690 /* Do the command and wait.. */ 686 /* Do the command and wait.. */
691 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE, 687 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
692 buffer, 8, NULL, SR_TIMEOUT, 688 buffer, sizeof(buffer), NULL,
693 MAX_RETRIES); 689 SR_TIMEOUT, MAX_RETRIES);
694 690
695 retries--; 691 retries--;
696 692
@@ -745,14 +741,8 @@ static void get_sectorsize(struct scsi_cd *cd)
745 741
746 queue = cd->device->request_queue; 742 queue = cd->device->request_queue;
747 blk_queue_hardsect_size(queue, sector_size); 743 blk_queue_hardsect_size(queue, sector_size);
748out:
749 kfree(buffer);
750 return;
751 744
752Enomem: 745 return;
753 cd->capacity = 0x1fffff;
754 cd->device->sector_size = 2048; /* A guess, just in case */
755 goto out;
756} 746}
757 747
758static void get_capabilities(struct scsi_cd *cd) 748static void get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 42be8b01a40f..6aeef22bd203 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1439,14 +1439,29 @@ static struct uart_driver atmel_uart = {
1439}; 1439};
1440 1440
1441#ifdef CONFIG_PM 1441#ifdef CONFIG_PM
1442static bool atmel_serial_clk_will_stop(void)
1443{
1444#ifdef CONFIG_ARCH_AT91
1445 return at91_suspend_entering_slow_clock();
1446#else
1447 return false;
1448#endif
1449}
1450
1442static int atmel_serial_suspend(struct platform_device *pdev, 1451static int atmel_serial_suspend(struct platform_device *pdev,
1443 pm_message_t state) 1452 pm_message_t state)
1444{ 1453{
1445 struct uart_port *port = platform_get_drvdata(pdev); 1454 struct uart_port *port = platform_get_drvdata(pdev);
1446 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1455 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1447 1456
1457 if (atmel_is_console_port(port) && console_suspend_enabled) {
1458 /* Drain the TX shifter */
1459 while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
1460 cpu_relax();
1461 }
1462
1448 if (device_may_wakeup(&pdev->dev) 1463 if (device_may_wakeup(&pdev->dev)
1449 && !at91_suspend_entering_slow_clock()) 1464 && !atmel_serial_clk_will_stop())
1450 enable_irq_wake(port->irq); 1465 enable_irq_wake(port->irq);
1451 else { 1466 else {
1452 uart_suspend_port(&atmel_uart, port); 1467 uart_suspend_port(&atmel_uart, port);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e0c5f96b273d..9b887ef64ff1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -7,7 +7,7 @@ menu "Graphics support"
7 7
8source "drivers/char/agp/Kconfig" 8source "drivers/char/agp/Kconfig"
9 9
10source "drivers/char/drm/Kconfig" 10source "drivers/gpu/drm/Kconfig"
11 11
12config VGASTATE 12config VGASTATE
13 tristate 13 tristate
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 4fb16240c04d..f5252c2552fd 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -21,8 +21,7 @@
21 21
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/mtrr.h> 23#include <asm/mtrr.h>
24 24#include <asm/visws/sgivw.h>
25#include <setup_arch.h>
26 25
27#define INCLUDE_TIMING_TABLE_DATA 26#define INCLUDE_TIMING_TABLE_DATA
28#define DBE_REG_BASE par->regs 27#define DBE_REG_BASE par->regs
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 619a6f8d65a2..47ed39b52f9c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -18,6 +18,7 @@
18 * frame buffer. 18 * frame buffer.
19 */ 19 */
20 20
21#include <linux/console.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fb.h> 24#include <linux/fb.h>
@@ -42,37 +43,68 @@ struct xenfb_info {
42 struct xenfb_page *page; 43 struct xenfb_page *page;
43 unsigned long *mfns; 44 unsigned long *mfns;
44 int update_wanted; /* XENFB_TYPE_UPDATE wanted */ 45 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
46 int feature_resize; /* XENFB_TYPE_RESIZE ok */
47 struct xenfb_resize resize; /* protected by resize_lock */
48 int resize_dpy; /* ditto */
49 spinlock_t resize_lock;
45 50
46 struct xenbus_device *xbdev; 51 struct xenbus_device *xbdev;
47}; 52};
48 53
49static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8; 54#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
50 55
56enum { KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT };
57static int video[KPARAM_CNT] = { 2, XENFB_WIDTH, XENFB_HEIGHT };
58module_param_array(video, int, NULL, 0);
59MODULE_PARM_DESC(video,
60 "Video memory size in MB, width, height in pixels (default 2,800,600)");
61
62static void xenfb_make_preferred_console(void);
51static int xenfb_remove(struct xenbus_device *); 63static int xenfb_remove(struct xenbus_device *);
52static void xenfb_init_shared_page(struct xenfb_info *); 64static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
53static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *); 65static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
54static void xenfb_disconnect_backend(struct xenfb_info *); 66static void xenfb_disconnect_backend(struct xenfb_info *);
55 67
68static void xenfb_send_event(struct xenfb_info *info,
69 union xenfb_out_event *event)
70{
71 u32 prod;
72
73 prod = info->page->out_prod;
74 /* caller ensures !xenfb_queue_full() */
75 mb(); /* ensure ring space available */
76 XENFB_OUT_RING_REF(info->page, prod) = *event;
77 wmb(); /* ensure ring contents visible */
78 info->page->out_prod = prod + 1;
79
80 notify_remote_via_irq(info->irq);
81}
82
56static void xenfb_do_update(struct xenfb_info *info, 83static void xenfb_do_update(struct xenfb_info *info,
57 int x, int y, int w, int h) 84 int x, int y, int w, int h)
58{ 85{
59 union xenfb_out_event event; 86 union xenfb_out_event event;
60 u32 prod;
61 87
88 memset(&event, 0, sizeof(event));
62 event.type = XENFB_TYPE_UPDATE; 89 event.type = XENFB_TYPE_UPDATE;
63 event.update.x = x; 90 event.update.x = x;
64 event.update.y = y; 91 event.update.y = y;
65 event.update.width = w; 92 event.update.width = w;
66 event.update.height = h; 93 event.update.height = h;
67 94
68 prod = info->page->out_prod;
69 /* caller ensures !xenfb_queue_full() */ 95 /* caller ensures !xenfb_queue_full() */
70 mb(); /* ensure ring space available */ 96 xenfb_send_event(info, &event);
71 XENFB_OUT_RING_REF(info->page, prod) = event; 97}
72 wmb(); /* ensure ring contents visible */
73 info->page->out_prod = prod + 1;
74 98
75 notify_remote_via_irq(info->irq); 99static void xenfb_do_resize(struct xenfb_info *info)
100{
101 union xenfb_out_event event;
102
103 memset(&event, 0, sizeof(event));
104 event.resize = info->resize;
105
106 /* caller ensures !xenfb_queue_full() */
107 xenfb_send_event(info, &event);
76} 108}
77 109
78static int xenfb_queue_full(struct xenfb_info *info) 110static int xenfb_queue_full(struct xenfb_info *info)
@@ -84,12 +116,28 @@ static int xenfb_queue_full(struct xenfb_info *info)
84 return prod - cons == XENFB_OUT_RING_LEN; 116 return prod - cons == XENFB_OUT_RING_LEN;
85} 117}
86 118
119static void xenfb_handle_resize_dpy(struct xenfb_info *info)
120{
121 unsigned long flags;
122
123 spin_lock_irqsave(&info->resize_lock, flags);
124 if (info->resize_dpy) {
125 if (!xenfb_queue_full(info)) {
126 info->resize_dpy = 0;
127 xenfb_do_resize(info);
128 }
129 }
130 spin_unlock_irqrestore(&info->resize_lock, flags);
131}
132
87static void xenfb_refresh(struct xenfb_info *info, 133static void xenfb_refresh(struct xenfb_info *info,
88 int x1, int y1, int w, int h) 134 int x1, int y1, int w, int h)
89{ 135{
90 unsigned long flags; 136 unsigned long flags;
91 int y2 = y1 + h - 1;
92 int x2 = x1 + w - 1; 137 int x2 = x1 + w - 1;
138 int y2 = y1 + h - 1;
139
140 xenfb_handle_resize_dpy(info);
93 141
94 if (!info->update_wanted) 142 if (!info->update_wanted)
95 return; 143 return;
@@ -222,6 +270,57 @@ static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
222 return res; 270 return res;
223} 271}
224 272
273static int
274xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
275{
276 struct xenfb_info *xenfb_info;
277 int required_mem_len;
278
279 xenfb_info = info->par;
280
281 if (!xenfb_info->feature_resize) {
282 if (var->xres == video[KPARAM_WIDTH] &&
283 var->yres == video[KPARAM_HEIGHT] &&
284 var->bits_per_pixel == xenfb_info->page->depth) {
285 return 0;
286 }
287 return -EINVAL;
288 }
289
290 /* Can't resize past initial width and height */
291 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
292 return -EINVAL;
293
294 required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
295 if (var->bits_per_pixel == xenfb_info->page->depth &&
296 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
297 required_mem_len <= info->fix.smem_len) {
298 var->xres_virtual = var->xres;
299 var->yres_virtual = var->yres;
300 return 0;
301 }
302 return -EINVAL;
303}
304
305static int xenfb_set_par(struct fb_info *info)
306{
307 struct xenfb_info *xenfb_info;
308 unsigned long flags;
309
310 xenfb_info = info->par;
311
312 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
313 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
314 xenfb_info->resize.width = info->var.xres;
315 xenfb_info->resize.height = info->var.yres;
316 xenfb_info->resize.stride = info->fix.line_length;
317 xenfb_info->resize.depth = info->var.bits_per_pixel;
318 xenfb_info->resize.offset = 0;
319 xenfb_info->resize_dpy = 1;
320 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
321 return 0;
322}
323
225static struct fb_ops xenfb_fb_ops = { 324static struct fb_ops xenfb_fb_ops = {
226 .owner = THIS_MODULE, 325 .owner = THIS_MODULE,
227 .fb_read = fb_sys_read, 326 .fb_read = fb_sys_read,
@@ -230,6 +329,8 @@ static struct fb_ops xenfb_fb_ops = {
230 .fb_fillrect = xenfb_fillrect, 329 .fb_fillrect = xenfb_fillrect,
231 .fb_copyarea = xenfb_copyarea, 330 .fb_copyarea = xenfb_copyarea,
232 .fb_imageblit = xenfb_imageblit, 331 .fb_imageblit = xenfb_imageblit,
332 .fb_check_var = xenfb_check_var,
333 .fb_set_par = xenfb_set_par,
233}; 334};
234 335
235static irqreturn_t xenfb_event_handler(int rq, void *dev_id) 336static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
@@ -258,6 +359,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
258{ 359{
259 struct xenfb_info *info; 360 struct xenfb_info *info;
260 struct fb_info *fb_info; 361 struct fb_info *fb_info;
362 int fb_size;
363 int val;
261 int ret; 364 int ret;
262 365
263 info = kzalloc(sizeof(*info), GFP_KERNEL); 366 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -265,18 +368,35 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
265 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 368 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
266 return -ENOMEM; 369 return -ENOMEM;
267 } 370 }
371
372 /* Limit kernel param videoram amount to what is in xenstore */
373 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
374 if (val < video[KPARAM_MEM])
375 video[KPARAM_MEM] = val;
376 }
377
378 /* If requested res does not fit in available memory, use default */
379 fb_size = video[KPARAM_MEM] * 1024 * 1024;
380 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8
381 > fb_size) {
382 video[KPARAM_WIDTH] = XENFB_WIDTH;
383 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
384 fb_size = XENFB_DEFAULT_FB_LEN;
385 }
386
268 dev->dev.driver_data = info; 387 dev->dev.driver_data = info;
269 info->xbdev = dev; 388 info->xbdev = dev;
270 info->irq = -1; 389 info->irq = -1;
271 info->x1 = info->y1 = INT_MAX; 390 info->x1 = info->y1 = INT_MAX;
272 spin_lock_init(&info->dirty_lock); 391 spin_lock_init(&info->dirty_lock);
392 spin_lock_init(&info->resize_lock);
273 393
274 info->fb = vmalloc(xenfb_mem_len); 394 info->fb = vmalloc(fb_size);
275 if (info->fb == NULL) 395 if (info->fb == NULL)
276 goto error_nomem; 396 goto error_nomem;
277 memset(info->fb, 0, xenfb_mem_len); 397 memset(info->fb, 0, fb_size);
278 398
279 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 399 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
280 400
281 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); 401 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
282 if (!info->mfns) 402 if (!info->mfns)
@@ -287,8 +407,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
287 if (!info->page) 407 if (!info->page)
288 goto error_nomem; 408 goto error_nomem;
289 409
290 xenfb_init_shared_page(info);
291
292 /* abusing framebuffer_alloc() to allocate pseudo_palette */ 410 /* abusing framebuffer_alloc() to allocate pseudo_palette */
293 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL); 411 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
294 if (fb_info == NULL) 412 if (fb_info == NULL)
@@ -301,9 +419,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
301 fb_info->screen_base = info->fb; 419 fb_info->screen_base = info->fb;
302 420
303 fb_info->fbops = &xenfb_fb_ops; 421 fb_info->fbops = &xenfb_fb_ops;
304 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width; 422 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
305 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height; 423 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
306 fb_info->var.bits_per_pixel = info->page->depth; 424 fb_info->var.bits_per_pixel = XENFB_DEPTH;
307 425
308 fb_info->var.red = (struct fb_bitfield){16, 8, 0}; 426 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
309 fb_info->var.green = (struct fb_bitfield){8, 8, 0}; 427 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
@@ -315,9 +433,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
315 fb_info->var.vmode = FB_VMODE_NONINTERLACED; 433 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
316 434
317 fb_info->fix.visual = FB_VISUAL_TRUECOLOR; 435 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
318 fb_info->fix.line_length = info->page->line_length; 436 fb_info->fix.line_length = fb_info->var.xres * XENFB_DEPTH / 8;
319 fb_info->fix.smem_start = 0; 437 fb_info->fix.smem_start = 0;
320 fb_info->fix.smem_len = xenfb_mem_len; 438 fb_info->fix.smem_len = fb_size;
321 strcpy(fb_info->fix.id, "xen"); 439 strcpy(fb_info->fix.id, "xen");
322 fb_info->fix.type = FB_TYPE_PACKED_PIXELS; 440 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
323 fb_info->fix.accel = FB_ACCEL_NONE; 441 fb_info->fix.accel = FB_ACCEL_NONE;
@@ -334,6 +452,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
334 fb_info->fbdefio = &xenfb_defio; 452 fb_info->fbdefio = &xenfb_defio;
335 fb_deferred_io_init(fb_info); 453 fb_deferred_io_init(fb_info);
336 454
455 xenfb_init_shared_page(info, fb_info);
456
337 ret = register_framebuffer(fb_info); 457 ret = register_framebuffer(fb_info);
338 if (ret) { 458 if (ret) {
339 fb_deferred_io_cleanup(fb_info); 459 fb_deferred_io_cleanup(fb_info);
@@ -348,6 +468,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
348 if (ret < 0) 468 if (ret < 0)
349 goto error; 469 goto error;
350 470
471 xenfb_make_preferred_console();
351 return 0; 472 return 0;
352 473
353 error_nomem: 474 error_nomem:
@@ -358,12 +479,34 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
358 return ret; 479 return ret;
359} 480}
360 481
482static __devinit void
483xenfb_make_preferred_console(void)
484{
485 struct console *c;
486
487 if (console_set_on_cmdline)
488 return;
489
490 acquire_console_sem();
491 for (c = console_drivers; c; c = c->next) {
492 if (!strcmp(c->name, "tty") && c->index == 0)
493 break;
494 }
495 release_console_sem();
496 if (c) {
497 unregister_console(c);
498 c->flags |= CON_CONSDEV;
499 c->flags &= ~CON_PRINTBUFFER; /* don't print again */
500 register_console(c);
501 }
502}
503
361static int xenfb_resume(struct xenbus_device *dev) 504static int xenfb_resume(struct xenbus_device *dev)
362{ 505{
363 struct xenfb_info *info = dev->dev.driver_data; 506 struct xenfb_info *info = dev->dev.driver_data;
364 507
365 xenfb_disconnect_backend(info); 508 xenfb_disconnect_backend(info);
366 xenfb_init_shared_page(info); 509 xenfb_init_shared_page(info, info->fb_info);
367 return xenfb_connect_backend(dev, info); 510 return xenfb_connect_backend(dev, info);
368} 511}
369 512
@@ -391,20 +534,23 @@ static unsigned long vmalloc_to_mfn(void *address)
391 return pfn_to_mfn(vmalloc_to_pfn(address)); 534 return pfn_to_mfn(vmalloc_to_pfn(address));
392} 535}
393 536
394static void xenfb_init_shared_page(struct xenfb_info *info) 537static void xenfb_init_shared_page(struct xenfb_info *info,
538 struct fb_info *fb_info)
395{ 539{
396 int i; 540 int i;
541 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
397 542
398 for (i = 0; i < info->nr_pages; i++) 543 for (i = 0; i < info->nr_pages; i++)
399 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); 544 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
400 545
401 info->page->pd[0] = vmalloc_to_mfn(info->mfns); 546 for (i = 0; i * epd < info->nr_pages; i++)
402 info->page->pd[1] = 0; 547 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
403 info->page->width = XENFB_WIDTH; 548
404 info->page->height = XENFB_HEIGHT; 549 info->page->width = fb_info->var.xres;
405 info->page->depth = XENFB_DEPTH; 550 info->page->height = fb_info->var.yres;
406 info->page->line_length = (info->page->depth / 8) * info->page->width; 551 info->page->depth = fb_info->var.bits_per_pixel;
407 info->page->mem_length = xenfb_mem_len; 552 info->page->line_length = fb_info->fix.line_length;
553 info->page->mem_length = fb_info->fix.smem_len;
408 info->page->in_cons = info->page->in_prod = 0; 554 info->page->in_cons = info->page->in_prod = 0;
409 info->page->out_cons = info->page->out_prod = 0; 555 info->page->out_cons = info->page->out_prod = 0;
410} 556}
@@ -504,6 +650,11 @@ InitWait:
504 val = 0; 650 val = 0;
505 if (val) 651 if (val)
506 info->update_wanted = 1; 652 info->update_wanted = 1;
653
654 if (xenbus_scanf(XBT_NIL, dev->otherend,
655 "feature-resize", "%d", &val) < 0)
656 val = 0;
657 info->feature_resize = val;
507 break; 658 break;
508 659
509 case XenbusStateClosing: 660 case XenbusStateClosing:
@@ -547,4 +698,6 @@ static void __exit xenfb_cleanup(void)
547module_init(xenfb_init); 698module_init(xenfb_init);
548module_exit(xenfb_cleanup); 699module_exit(xenfb_cleanup);
549 700
701MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
550MODULE_LICENSE("GPL"); 702MODULE_LICENSE("GPL");
703MODULE_ALIAS("xen:vfb");
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 37af04f1ffd9..363286c54290 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y += grant-table.o features.o events.o 1obj-y += grant-table.o features.o events.o manage.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o 4obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ab25ba6cbbb9..591bc29b55f5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -225,7 +225,7 @@ static int increase_reservation(unsigned long nr_pages)
225 page = balloon_next_page(page); 225 page = balloon_next_page(page);
226 } 226 }
227 227
228 reservation.extent_start = (unsigned long)frame_list; 228 set_xen_guest_handle(reservation.extent_start, frame_list);
229 reservation.nr_extents = nr_pages; 229 reservation.nr_extents = nr_pages;
230 rc = HYPERVISOR_memory_op( 230 rc = HYPERVISOR_memory_op(
231 XENMEM_populate_physmap, &reservation); 231 XENMEM_populate_physmap, &reservation);
@@ -321,7 +321,7 @@ static int decrease_reservation(unsigned long nr_pages)
321 balloon_append(pfn_to_page(pfn)); 321 balloon_append(pfn_to_page(pfn));
322 } 322 }
323 323
324 reservation.extent_start = (unsigned long)frame_list; 324 set_xen_guest_handle(reservation.extent_start, frame_list);
325 reservation.nr_extents = nr_pages; 325 reservation.nr_extents = nr_pages;
326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
327 BUG_ON(ret != nr_pages); 327 BUG_ON(ret != nr_pages);
@@ -368,7 +368,7 @@ static void balloon_process(struct work_struct *work)
368} 368}
369 369
370/* Resets the Xen limit, sets new target, and kicks off processing. */ 370/* Resets the Xen limit, sets new target, and kicks off processing. */
371void balloon_set_new_target(unsigned long target) 371static void balloon_set_new_target(unsigned long target)
372{ 372{
373 /* No need for lock. Not read-modify-write updates. */ 373 /* No need for lock. Not read-modify-write updates. */
374 balloon_stats.hard_limit = ~0UL; 374 balloon_stats.hard_limit = ~0UL;
@@ -483,7 +483,7 @@ static int dealloc_pte_fn(
483 .extent_order = 0, 483 .extent_order = 0,
484 .domid = DOMID_SELF 484 .domid = DOMID_SELF
485 }; 485 };
486 reservation.extent_start = (unsigned long)&mfn; 486 set_xen_guest_handle(reservation.extent_start, &mfn);
487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); 487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); 488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
@@ -519,7 +519,7 @@ static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
519 .extent_order = 0, 519 .extent_order = 0,
520 .domid = DOMID_SELF 520 .domid = DOMID_SELF
521 }; 521 };
522 reservation.extent_start = (unsigned long)&gmfn; 522 set_xen_guest_handle(reservation.extent_start, &gmfn);
523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
524 &reservation); 524 &reservation);
525 if (ret == 1) 525 if (ret == 1)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 76e5b7386af9..332dd63750a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -355,7 +355,7 @@ static void unbind_from_irq(unsigned int irq)
355 355
356 spin_lock(&irq_mapping_update_lock); 356 spin_lock(&irq_mapping_update_lock);
357 357
358 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { 358 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
359 close.port = evtchn; 359 close.port = evtchn;
360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
361 BUG(); 361 BUG();
@@ -375,7 +375,7 @@ static void unbind_from_irq(unsigned int irq)
375 evtchn_to_irq[evtchn] = -1; 375 evtchn_to_irq[evtchn] = -1;
376 irq_info[irq] = IRQ_UNBOUND; 376 irq_info[irq] = IRQ_UNBOUND;
377 377
378 dynamic_irq_init(irq); 378 dynamic_irq_cleanup(irq);
379 } 379 }
380 380
381 spin_unlock(&irq_mapping_update_lock); 381 spin_unlock(&irq_mapping_update_lock);
@@ -557,6 +557,33 @@ out:
557 put_cpu(); 557 put_cpu();
558} 558}
559 559
560/* Rebind a new event channel to an existing irq. */
561void rebind_evtchn_irq(int evtchn, int irq)
562{
563 /* Make sure the irq is masked, since the new event channel
564 will also be masked. */
565 disable_irq(irq);
566
567 spin_lock(&irq_mapping_update_lock);
568
569 /* After resume the irq<->evtchn mappings are all cleared out */
570 BUG_ON(evtchn_to_irq[evtchn] != -1);
571 /* Expect irq to have been bound before,
572 so the bindcount should be non-0 */
573 BUG_ON(irq_bindcount[irq] == 0);
574
575 evtchn_to_irq[evtchn] = irq;
576 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
577
578 spin_unlock(&irq_mapping_update_lock);
579
580 /* new event channels are always bound to cpu 0 */
581 irq_set_affinity(irq, cpumask_of_cpu(0));
582
583 /* Unmask the event channel. */
584 enable_irq(irq);
585}
586
560/* Rebind an evtchn so that it gets delivered to a specific cpu */ 587/* Rebind an evtchn so that it gets delivered to a specific cpu */
561static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 588static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
562{ 589{
@@ -647,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq)
647 return ret; 674 return ret;
648} 675}
649 676
677static void restore_cpu_virqs(unsigned int cpu)
678{
679 struct evtchn_bind_virq bind_virq;
680 int virq, irq, evtchn;
681
682 for (virq = 0; virq < NR_VIRQS; virq++) {
683 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
684 continue;
685
686 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
687 BUG_ON(irq_info[irq].index != virq);
688
689 /* Get a new binding from Xen. */
690 bind_virq.virq = virq;
691 bind_virq.vcpu = cpu;
692 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
693 &bind_virq) != 0)
694 BUG();
695 evtchn = bind_virq.port;
696
697 /* Record the new mapping. */
698 evtchn_to_irq[evtchn] = irq;
699 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
700 bind_evtchn_to_cpu(evtchn, cpu);
701
702 /* Ready for use. */
703 unmask_evtchn(evtchn);
704 }
705}
706
707static void restore_cpu_ipis(unsigned int cpu)
708{
709 struct evtchn_bind_ipi bind_ipi;
710 int ipi, irq, evtchn;
711
712 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
713 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
714 continue;
715
716 BUG_ON(irq_info[irq].type != IRQT_IPI);
717 BUG_ON(irq_info[irq].index != ipi);
718
719 /* Get a new binding from Xen. */
720 bind_ipi.vcpu = cpu;
721 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
722 &bind_ipi) != 0)
723 BUG();
724 evtchn = bind_ipi.port;
725
726 /* Record the new mapping. */
727 evtchn_to_irq[evtchn] = irq;
728 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
729 bind_evtchn_to_cpu(evtchn, cpu);
730
731 /* Ready for use. */
732 unmask_evtchn(evtchn);
733
734 }
735}
736
737void xen_irq_resume(void)
738{
739 unsigned int cpu, irq, evtchn;
740
741 init_evtchn_cpu_bindings();
742
743 /* New event-channel space is not 'live' yet. */
744 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
745 mask_evtchn(evtchn);
746
747 /* No IRQ <-> event-channel mappings. */
748 for (irq = 0; irq < NR_IRQS; irq++)
749 irq_info[irq].evtchn = 0; /* zap event-channel binding */
750
751 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
752 evtchn_to_irq[evtchn] = -1;
753
754 for_each_possible_cpu(cpu) {
755 restore_cpu_virqs(cpu);
756 restore_cpu_ipis(cpu);
757 }
758}
759
650static struct irq_chip xen_dynamic_chip __read_mostly = { 760static struct irq_chip xen_dynamic_chip __read_mostly = {
651 .name = "xen-dyn", 761 .name = "xen-dyn",
652 .mask = disable_dynirq, 762 .mask = disable_dynirq,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 52b6b41b909d..e9e11168616a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -471,14 +471,14 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
471 return 0; 471 return 0;
472} 472}
473 473
474static int gnttab_resume(void) 474int gnttab_resume(void)
475{ 475{
476 if (max_nr_grant_frames() < nr_grant_frames) 476 if (max_nr_grant_frames() < nr_grant_frames)
477 return -ENOSYS; 477 return -ENOSYS;
478 return gnttab_map(0, nr_grant_frames - 1); 478 return gnttab_map(0, nr_grant_frames - 1);
479} 479}
480 480
481static int gnttab_suspend(void) 481int gnttab_suspend(void)
482{ 482{
483 arch_gnttab_unmap_shared(shared, nr_grant_frames); 483 arch_gnttab_unmap_shared(shared, nr_grant_frames);
484 return 0; 484 return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
new file mode 100644
index 000000000000..5b546e365f00
--- /dev/null
+++ b/drivers/xen/manage.c
@@ -0,0 +1,252 @@
1/*
2 * Handle extern requests for shutdown, reboot and sysrq
3 */
4#include <linux/kernel.h>
5#include <linux/err.h>
6#include <linux/reboot.h>
7#include <linux/sysrq.h>
8#include <linux/stop_machine.h>
9#include <linux/freezer.h>
10
11#include <xen/xenbus.h>
12#include <xen/grant_table.h>
13#include <xen/events.h>
14#include <xen/hvc-console.h>
15#include <xen/xen-ops.h>
16
17#include <asm/xen/hypercall.h>
18#include <asm/xen/page.h>
19
20enum shutdown_state {
21 SHUTDOWN_INVALID = -1,
22 SHUTDOWN_POWEROFF = 0,
23 SHUTDOWN_SUSPEND = 2,
24 /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
25 report a crash, not be instructed to crash!
26 HALT is the same as POWEROFF, as far as we're concerned. The tools use
27 the distinction when we return the reason code to them. */
28 SHUTDOWN_HALT = 4,
29};
30
31/* Ignore multiple shutdown requests. */
32static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
33
34#ifdef CONFIG_PM_SLEEP
35static int xen_suspend(void *data)
36{
37 int *cancelled = data;
38 int err;
39
40 BUG_ON(!irqs_disabled());
41
42 load_cr3(swapper_pg_dir);
43
44 err = device_power_down(PMSG_SUSPEND);
45 if (err) {
46 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
47 err);
48 return err;
49 }
50
51 xen_mm_pin_all();
52 gnttab_suspend();
53 xen_pre_suspend();
54
55 /*
56 * This hypercall returns 1 if suspend was cancelled
57 * or the domain was merely checkpointed, and 0 if it
58 * is resuming in a new domain.
59 */
60 *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
61
62 xen_post_suspend(*cancelled);
63 gnttab_resume();
64 xen_mm_unpin_all();
65
66 device_power_up();
67
68 if (!*cancelled) {
69 xen_irq_resume();
70 xen_console_resume();
71 }
72
73 return 0;
74}
75
76static void do_suspend(void)
77{
78 int err;
79 int cancelled = 1;
80
81 shutting_down = SHUTDOWN_SUSPEND;
82
83#ifdef CONFIG_PREEMPT
84 /* If the kernel is preemptible, we need to freeze all the processes
85 to prevent them from being in the middle of a pagetable update
86 during suspend. */
87 err = freeze_processes();
88 if (err) {
89 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
90 return;
91 }
92#endif
93
94 err = device_suspend(PMSG_SUSPEND);
95 if (err) {
96 printk(KERN_ERR "xen suspend: device_suspend %d\n", err);
97 goto out;
98 }
99
100 printk("suspending xenbus...\n");
101 /* XXX use normal device tree? */
102 xenbus_suspend();
103
104 err = stop_machine_run(xen_suspend, &cancelled, 0);
105 if (err) {
106 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
107 goto out;
108 }
109
110 if (!cancelled)
111 xenbus_resume();
112 else
113 xenbus_suspend_cancel();
114
115 device_resume();
116
117 /* Make sure timer events get retriggered on all CPUs */
118 clock_was_set();
119out:
120#ifdef CONFIG_PREEMPT
121 thaw_processes();
122#endif
123 shutting_down = SHUTDOWN_INVALID;
124}
125#endif /* CONFIG_PM_SLEEP */
126
127static void shutdown_handler(struct xenbus_watch *watch,
128 const char **vec, unsigned int len)
129{
130 char *str;
131 struct xenbus_transaction xbt;
132 int err;
133
134 if (shutting_down != SHUTDOWN_INVALID)
135 return;
136
137 again:
138 err = xenbus_transaction_start(&xbt);
139 if (err)
140 return;
141
142 str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
143 /* Ignore read errors and empty reads. */
144 if (XENBUS_IS_ERR_READ(str)) {
145 xenbus_transaction_end(xbt, 1);
146 return;
147 }
148
149 xenbus_write(xbt, "control", "shutdown", "");
150
151 err = xenbus_transaction_end(xbt, 0);
152 if (err == -EAGAIN) {
153 kfree(str);
154 goto again;
155 }
156
157 if (strcmp(str, "poweroff") == 0 ||
158 strcmp(str, "halt") == 0) {
159 shutting_down = SHUTDOWN_POWEROFF;
160 orderly_poweroff(false);
161 } else if (strcmp(str, "reboot") == 0) {
162 shutting_down = SHUTDOWN_POWEROFF; /* ? */
163 ctrl_alt_del();
164#ifdef CONFIG_PM_SLEEP
165 } else if (strcmp(str, "suspend") == 0) {
166 do_suspend();
167#endif
168 } else {
169 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
170 shutting_down = SHUTDOWN_INVALID;
171 }
172
173 kfree(str);
174}
175
176static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
177 unsigned int len)
178{
179 char sysrq_key = '\0';
180 struct xenbus_transaction xbt;
181 int err;
182
183 again:
184 err = xenbus_transaction_start(&xbt);
185 if (err)
186 return;
187 if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
188 printk(KERN_ERR "Unable to read sysrq code in "
189 "control/sysrq\n");
190 xenbus_transaction_end(xbt, 1);
191 return;
192 }
193
194 if (sysrq_key != '\0')
195 xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
196
197 err = xenbus_transaction_end(xbt, 0);
198 if (err == -EAGAIN)
199 goto again;
200
201 if (sysrq_key != '\0')
202 handle_sysrq(sysrq_key, NULL);
203}
204
205static struct xenbus_watch shutdown_watch = {
206 .node = "control/shutdown",
207 .callback = shutdown_handler
208};
209
210static struct xenbus_watch sysrq_watch = {
211 .node = "control/sysrq",
212 .callback = sysrq_handler
213};
214
215static int setup_shutdown_watcher(void)
216{
217 int err;
218
219 err = register_xenbus_watch(&shutdown_watch);
220 if (err) {
221 printk(KERN_ERR "Failed to set shutdown watcher\n");
222 return err;
223 }
224
225 err = register_xenbus_watch(&sysrq_watch);
226 if (err) {
227 printk(KERN_ERR "Failed to set sysrq watcher\n");
228 return err;
229 }
230
231 return 0;
232}
233
234static int shutdown_event(struct notifier_block *notifier,
235 unsigned long event,
236 void *data)
237{
238 setup_shutdown_watcher();
239 return NOTIFY_DONE;
240}
241
242static int __init setup_shutdown_event(void)
243{
244 static struct notifier_block xenstore_notifier = {
245 .notifier_call = shutdown_event
246 };
247 register_xenstore_notifier(&xenstore_notifier);
248
249 return 0;
250}
251
252subsys_initcall(setup_shutdown_event);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 0f86b0ff7879..9678b3e98c63 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -117,7 +117,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
117 char *path; 117 char *path;
118 118
119 va_start(ap, pathfmt); 119 va_start(ap, pathfmt);
120 path = kvasprintf(GFP_KERNEL, pathfmt, ap); 120 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
121 va_end(ap); 121 va_end(ap);
122 122
123 if (!path) { 123 if (!path) {
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 6efbe3f29ca5..090c61ee8fd0 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -203,7 +203,6 @@ int xb_read(void *data, unsigned len)
203int xb_init_comms(void) 203int xb_init_comms(void)
204{ 204{
205 struct xenstore_domain_interface *intf = xen_store_interface; 205 struct xenstore_domain_interface *intf = xen_store_interface;
206 int err;
207 206
208 if (intf->req_prod != intf->req_cons) 207 if (intf->req_prod != intf->req_cons)
209 printk(KERN_ERR "XENBUS request ring is not quiescent " 208 printk(KERN_ERR "XENBUS request ring is not quiescent "
@@ -216,18 +215,20 @@ int xb_init_comms(void)
216 intf->rsp_cons = intf->rsp_prod; 215 intf->rsp_cons = intf->rsp_prod;
217 } 216 }
218 217
219 if (xenbus_irq) 218 if (xenbus_irq) {
220 unbind_from_irqhandler(xenbus_irq, &xb_waitq); 219 /* Already have an irq; assume we're resuming */
220 rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
221 } else {
222 int err;
223 err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 }
221 229
222 err = bind_evtchn_to_irqhandler( 230 xenbus_irq = err;
223 xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 } 231 }
229 232
230 xenbus_irq = err;
231
232 return 0; 233 return 0;
233} 234}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 227d53b12a5c..7f2f91c0e11d 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -283,9 +283,9 @@ static char *join(const char *dir, const char *name)
283 char *buffer; 283 char *buffer;
284 284
285 if (strlen(name) == 0) 285 if (strlen(name) == 0)
286 buffer = kasprintf(GFP_KERNEL, "%s", dir); 286 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
287 else 287 else
288 buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name); 288 buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
289 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; 289 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
290} 290}
291 291
@@ -297,7 +297,7 @@ static char **split(char *strings, unsigned int len, unsigned int *num)
297 *num = count_strings(strings, len); 297 *num = count_strings(strings, len);
298 298
299 /* Transfer to one big alloc for easy freeing. */ 299 /* Transfer to one big alloc for easy freeing. */
300 ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL); 300 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
301 if (!ret) { 301 if (!ret) {
302 kfree(strings); 302 kfree(strings);
303 return ERR_PTR(-ENOMEM); 303 return ERR_PTR(-ENOMEM);
@@ -751,7 +751,7 @@ static int process_msg(void)
751 } 751 }
752 752
753 753
754 msg = kmalloc(sizeof(*msg), GFP_KERNEL); 754 msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
755 if (msg == NULL) { 755 if (msg == NULL) {
756 err = -ENOMEM; 756 err = -ENOMEM;
757 goto out; 757 goto out;
@@ -763,7 +763,7 @@ static int process_msg(void)
763 goto out; 763 goto out;
764 } 764 }
765 765
766 body = kmalloc(msg->hdr.len + 1, GFP_KERNEL); 766 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
767 if (body == NULL) { 767 if (body == NULL) {
768 kfree(msg); 768 kfree(msg);
769 err = -ENOMEM; 769 err = -ENOMEM;